summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeremy Koritzinsky <jekoritz@microsoft.com>2024-03-01 12:07:24 -0800
committerJeremy Koritzinsky <jekoritz@microsoft.com>2024-03-01 12:08:14 -0800
commitf861a1ce6e69de826c5419c60c658efa2b903662 (patch)
tree0ab4d31fa71c2fef3e841eed2f0ade501f877004
parentc9f3f67bfef74fe3bcb4ca938a43e0faa8bc5db1 (diff)
parenta2e4177bb55f192483a9f9477a79621338f9c58a (diff)
Merge branch 'main' of github.com:dotnet/runtime into pr-single-jobpr-single-job
This should enable the XUnitLogChecker deployment for the libraries tests.
-rw-r--r--.devcontainer/Dockerfile3
-rw-r--r--.github/CODEOWNERS6
-rw-r--r--.github/ISSUE_TEMPLATE/04_blank_issue.md (renamed from .github/ISSUE_TEMPLATE/05_blank_issue.md)0
-rw-r--r--.github/ISSUE_TEMPLATE/04_ci_known_issue.yml32
-rw-r--r--.github/ISSUE_TEMPLATE/config.yml3
-rw-r--r--Directory.Build.props2
-rw-r--r--docs/area-owners.md8
-rw-r--r--docs/design/coreclr/botr/guide-for-porting.md6
-rw-r--r--docs/design/coreclr/jit/first-class-structs.md4
-rw-r--r--docs/design/coreclr/jit/ryujit-overview.md6
-rw-r--r--docs/design/coreclr/jit/ryujit-tutorial.md4
-rw-r--r--docs/workflow/ci/failure-analysis.md24
-rw-r--r--eng/DotNetBuild.props4
-rw-r--r--eng/Subsets.props2
-rw-r--r--eng/Version.Details.xml177
-rw-r--r--eng/Versions.props68
-rw-r--r--eng/build.ps13
-rwxr-xr-xeng/build.sh3
-rw-r--r--eng/common/templates-official/job/job.yml255
-rw-r--r--eng/common/templates-official/job/onelocbuild.yml112
-rw-r--r--eng/common/templates-official/job/publish-build-assets.yml157
-rw-r--r--eng/common/templates-official/job/source-build.yml67
-rw-r--r--eng/common/templates-official/job/source-index-stage1.yml67
-rw-r--r--eng/common/templates-official/jobs/codeql-build.yml31
-rw-r--r--eng/common/templates-official/jobs/jobs.yml97
-rw-r--r--eng/common/templates-official/jobs/source-build.yml46
-rw-r--r--eng/common/templates-official/post-build/common-variables.yml24
-rw-r--r--eng/common/templates-official/post-build/post-build.yml285
-rw-r--r--eng/common/templates-official/post-build/setup-maestro-vars.yml70
-rw-r--r--eng/common/templates-official/post-build/trigger-subscription.yml13
-rw-r--r--eng/common/templates-official/steps/add-build-to-channel.yml13
-rw-r--r--eng/common/templates-official/steps/component-governance.yml13
-rw-r--r--eng/common/templates-official/steps/generate-sbom.yml48
-rw-r--r--eng/common/templates-official/steps/publish-logs.yml49
-rw-r--r--eng/common/templates-official/steps/retain-build.yml28
-rw-r--r--eng/common/templates-official/steps/send-to-helix.yml91
-rw-r--r--eng/common/templates-official/steps/source-build.yml129
-rw-r--r--eng/common/templates-official/variables/pool-providers.yml45
-rw-r--r--eng/common/templates-official/variables/sdl-variables.yml7
-rw-r--r--eng/common/tools.ps18
-rwxr-xr-xeng/common/tools.sh8
-rw-r--r--eng/pipelines/common/templates/runtimes/run-test-job.yml2
-rw-r--r--eng/pipelines/common/xplat-setup.yml6
-rw-r--r--eng/pipelines/coreclr/libraries-pgo.yml2
-rw-r--r--eng/pipelines/coreclr/perf-non-wasm-jobs.yml18
-rw-r--r--eng/pipelines/coreclr/runtime-nativeaot-outerloop.yml14
-rw-r--r--eng/pipelines/coreclr/templates/helix-queues-setup.yml4
-rw-r--r--eng/pipelines/runtime.yml8
-rw-r--r--eng/testing/ChromeVersions.props16
-rw-r--r--eng/testing/scenarios/BuildWasmAppsJobsList.txt3
-rw-r--r--global.json12
-rw-r--r--src/coreclr/System.Private.CoreLib/System.Private.CoreLib.csproj2
-rw-r--r--src/coreclr/System.Private.CoreLib/src/Internal/Runtime/InteropServices/ComActivator.cs1
-rw-r--r--src/coreclr/System.Private.CoreLib/src/System/Array.CoreCLR.cs4
-rw-r--r--src/coreclr/System.Private.CoreLib/src/System/Object.CoreCLR.cs7
-rw-r--r--src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/DynamicILGenerator.cs2
-rw-r--r--src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeAssembly.cs18
-rw-r--r--src/coreclr/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.CoreCLR.cs19
-rw-r--r--src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.CoreCLR.cs30
-rw-r--r--src/coreclr/System.Private.CoreLib/src/System/Runtime/ExceptionServices/InternalCalls.cs2
-rw-r--r--src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.CoreCLR.cs4
-rw-r--r--src/coreclr/System.Private.CoreLib/src/System/RuntimeType.CoreCLR.cs7
-rw-r--r--src/coreclr/System.Private.CoreLib/src/System/String.CoreCLR.cs2
-rw-r--r--src/coreclr/System.Private.CoreLib/src/System/StubHelpers.cs10
-rw-r--r--src/coreclr/System.Private.CoreLib/src/System/Threading/WaitHandle.CoreCLR.cs2
-rw-r--r--src/coreclr/System.Private.CoreLib/src/System/ValueType.cs11
-rw-r--r--src/coreclr/classlibnative/bcltype/objectnative.cpp80
-rw-r--r--src/coreclr/classlibnative/bcltype/objectnative.h4
-rw-r--r--src/coreclr/classlibnative/bcltype/system.cpp28
-rw-r--r--src/coreclr/classlibnative/bcltype/system.h3
-rw-r--r--src/coreclr/clrdefinitions.cmake6
-rw-r--r--src/coreclr/crosscomponents.cmake7
-rw-r--r--src/coreclr/debug/daccess/dacdbiimpl.cpp4
-rw-r--r--src/coreclr/debug/daccess/request.cpp6
-rw-r--r--src/coreclr/debug/ee/controller.h2
-rw-r--r--src/coreclr/debug/inc/arm64/primitives.h6
-rw-r--r--src/coreclr/debug/inc/loongarch64/primitives.h4
-rw-r--r--src/coreclr/debug/inc/riscv64/primitives.h4
-rw-r--r--src/coreclr/gc/env/common.h1
-rw-r--r--src/coreclr/gc/gc.cpp610
-rw-r--r--src/coreclr/gc/gcpriv.h159
-rw-r--r--src/coreclr/inc/check.h12
-rw-r--r--src/coreclr/inc/check.inl40
-rw-r--r--src/coreclr/inc/clr_std/type_traits4
-rw-r--r--src/coreclr/inc/clrconfignocache.h2
-rw-r--r--src/coreclr/inc/clrconfigvalues.h2
-rw-r--r--src/coreclr/inc/clrnt.h7
-rw-r--r--src/coreclr/inc/clrtypes.h9
-rw-r--r--src/coreclr/inc/corinfo.h3
-rw-r--r--src/coreclr/inc/daccess.h5
-rw-r--r--src/coreclr/inc/gcinfo.h9
-rw-r--r--src/coreclr/inc/jiteeversionguid.h10
-rw-r--r--src/coreclr/inc/jithelpers.h11
-rw-r--r--src/coreclr/inc/readytorun.h12
-rw-r--r--src/coreclr/inc/readytorunhelpers.h2
-rw-r--r--src/coreclr/inc/regdisp.h2
-rw-r--r--src/coreclr/jit/CMakeLists.txt18
-rw-r--r--src/coreclr/jit/assertionprop.cpp3
-rw-r--r--src/coreclr/jit/block.cpp129
-rw-r--r--src/coreclr/jit/block.h777
-rw-r--r--src/coreclr/jit/clrjit.natvis7
-rw-r--r--src/coreclr/jit/codegen.h4
-rw-r--r--src/coreclr/jit/codegenarm.cpp25
-rw-r--r--src/coreclr/jit/codegenarm64.cpp28
-rw-r--r--src/coreclr/jit/codegenarm64test.cpp809
-rw-r--r--src/coreclr/jit/codegenarmarch.cpp22
-rw-r--r--src/coreclr/jit/codegencommon.cpp75
-rw-r--r--src/coreclr/jit/codegenlinear.cpp21
-rw-r--r--src/coreclr/jit/codegenloongarch64.cpp33
-rw-r--r--src/coreclr/jit/codegenriscv64.cpp32
-rw-r--r--src/coreclr/jit/codegenxarch.cpp55
-rw-r--r--src/coreclr/jit/compiler.cpp38
-rw-r--r--src/coreclr/jit/compiler.h81
-rw-r--r--src/coreclr/jit/compiler.hpp40
-rw-r--r--src/coreclr/jit/compmemkind.h1
-rw-r--r--src/coreclr/jit/compphases.h1
-rw-r--r--src/coreclr/jit/emit.cpp3
-rw-r--r--src/coreclr/jit/emitarm64.cpp2385
-rw-r--r--src/coreclr/jit/emitarm64.h110
-rw-r--r--src/coreclr/jit/emitloongarch64.cpp156
-rw-r--r--src/coreclr/jit/emitriscv64.cpp6
-rw-r--r--src/coreclr/jit/emitxarch.cpp9
-rw-r--r--src/coreclr/jit/fgbasic.cpp271
-rw-r--r--src/coreclr/jit/fgdiagnostic.cpp7
-rw-r--r--src/coreclr/jit/fgehopt.cpp63
-rw-r--r--src/coreclr/jit/fgflow.cpp110
-rw-r--r--src/coreclr/jit/fginline.cpp18
-rw-r--r--src/coreclr/jit/fgopt.cpp207
-rw-r--r--src/coreclr/jit/fgprofile.cpp171
-rw-r--r--src/coreclr/jit/fgprofilesynthesis.cpp177
-rw-r--r--src/coreclr/jit/fgprofilesynthesis.h1
-rw-r--r--src/coreclr/jit/flowgraph.cpp66
-rw-r--r--src/coreclr/jit/gcencode.cpp6
-rw-r--r--src/coreclr/jit/gentree.cpp217
-rw-r--r--src/coreclr/jit/gentree.h80
-rw-r--r--src/coreclr/jit/gtlist.h2
-rw-r--r--src/coreclr/jit/gtstructs.h5
-rw-r--r--src/coreclr/jit/helperexpansion.cpp297
-rw-r--r--src/coreclr/jit/ifconversion.cpp2
-rw-r--r--src/coreclr/jit/importer.cpp394
-rw-r--r--src/coreclr/jit/importercalls.cpp197
-rw-r--r--src/coreclr/jit/indirectcalltransformer.cpp168
-rw-r--r--src/coreclr/jit/inductionvariableopts.cpp686
-rw-r--r--src/coreclr/jit/instr.h2
-rw-r--r--src/coreclr/jit/jitconfigvalues.h6
-rw-r--r--src/coreclr/jit/jiteh.cpp26
-rw-r--r--src/coreclr/jit/jitmetadatalist.h2
-rw-r--r--src/coreclr/jit/liveness.cpp2
-rw-r--r--src/coreclr/jit/loopcloning.cpp52
-rw-r--r--src/coreclr/jit/lower.cpp308
-rw-r--r--src/coreclr/jit/lower.h1
-rw-r--r--src/coreclr/jit/lowerarmarch.cpp7
-rw-r--r--src/coreclr/jit/lowerloongarch64.cpp7
-rw-r--r--src/coreclr/jit/lowerriscv64.cpp6
-rw-r--r--src/coreclr/jit/lowerxarch.cpp6
-rw-r--r--src/coreclr/jit/lsra.cpp17
-rw-r--r--src/coreclr/jit/lsraarm.cpp1
-rw-r--r--src/coreclr/jit/lsraarm64.cpp39
-rw-r--r--src/coreclr/jit/lsraarmarch.cpp32
-rw-r--r--src/coreclr/jit/lsrabuild.cpp16
-rw-r--r--src/coreclr/jit/lsraloongarch64.cpp9
-rw-r--r--src/coreclr/jit/lsrariscv64.cpp9
-rw-r--r--src/coreclr/jit/lsraxarch.cpp47
-rw-r--r--src/coreclr/jit/morph.cpp159
-rw-r--r--src/coreclr/jit/morphblock.cpp67
-rw-r--r--src/coreclr/jit/namedintrinsiclist.h4
-rw-r--r--src/coreclr/jit/optcse.cpp124
-rw-r--r--src/coreclr/jit/optcse.h6
-rw-r--r--src/coreclr/jit/optimizebools.cpp58
-rw-r--r--src/coreclr/jit/optimizer.cpp82
-rw-r--r--src/coreclr/jit/patchpoint.cpp16
-rw-r--r--src/coreclr/jit/scev.cpp959
-rw-r--r--src/coreclr/jit/scev.h222
-rw-r--r--src/coreclr/jit/sideeffects.cpp2
-rw-r--r--src/coreclr/jit/simd.cpp2
-rw-r--r--src/coreclr/jit/switchrecognition.cpp19
-rw-r--r--src/coreclr/jit/targetamd64.h5
-rw-r--r--src/coreclr/jit/targetarm64.h5
-rw-r--r--src/coreclr/jit/valuenum.cpp39
-rw-r--r--src/coreclr/nativeaot/Common/src/Internal/Runtime/CompilerHelpers/StartupCodeHelpers.cs7
-rw-r--r--src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifier.cs2
-rw-r--r--src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierW.cs2
-rw-r--r--src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierWKeyed.cs2
-rw-r--r--src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/CompilerServices/Unsafe.cs10
-rw-r--r--src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.cs35
-rw-r--r--src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs2
-rw-r--r--src/coreclr/nativeaot/Runtime/CMakeLists.txt6
-rw-r--r--src/coreclr/nativeaot/Runtime/EHHelpers.cpp16
-rw-r--r--src/coreclr/nativeaot/Runtime/MathHelpers.cpp5
-rw-r--r--src/coreclr/nativeaot/Runtime/arm64/AllocFast.S4
-rw-r--r--src/coreclr/nativeaot/Runtime/arm64/GcProbe.S3
-rw-r--r--src/coreclr/nativeaot/Runtime/inc/ModuleHeaders.h2
-rw-r--r--src/coreclr/nativeaot/Runtime/windows/PalRedhawkMinWin.cpp2
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/CompatibilitySuppressions.xml4
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/FrozenObjectHeapManager.cs10
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj1
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System/Array.NativeAot.cs4
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System/Object.NativeAot.cs2
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/MethodInfos/CustomMethodInvoker.cs2
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/RuntimeAssembly.cs24
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.NativeAot.cs41
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/CompilerServices/ClassConstructorRunner.cs6
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.NativeAot.cs2
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.NativeAot.cs4
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Condition.cs1
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Lock.NativeAot.cs27
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/SyncTable.cs2
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.Windows.cs2
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.cs2
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System/Type.NativeAot.cs9
-rw-r--r--src/coreclr/nativeaot/System.Private.CoreLib/src/System/ValueType.cs45
-rw-r--r--src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericsRegistration.cs2
-rw-r--r--src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.StaticsLookup.cs2
-rw-r--r--src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.cs2
-rw-r--r--src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeSystemContextFactory.cs2
-rw-r--r--src/coreclr/runtime.proj1
-rwxr-xr-xsrc/coreclr/scripts/emitUnitTests.sh1
-rw-r--r--src/coreclr/scripts/superpmi.py32
-rw-r--r--src/coreclr/tools/Common/Compiler/DependencyAnalysis/Target_X86/X86Emitter.cs73
-rw-r--r--src/coreclr/tools/Common/Compiler/InstructionSetSupport.cs12
-rw-r--r--src/coreclr/tools/Common/InstructionSetHelpers.cs51
-rw-r--r--src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs2
-rw-r--r--src/coreclr/tools/Common/Internal/Runtime/ReadyToRunConstants.cs4
-rw-r--r--src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs3
-rw-r--r--src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs3
-rw-r--r--src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs14
-rw-r--r--src/coreclr/tools/Common/JitInterface/LoongArch64PassStructInRegister.cs11
-rw-r--r--src/coreclr/tools/Common/TypeSystem/Common/ArrayType.cs15
-rw-r--r--src/coreclr/tools/Common/TypeSystem/IL/Stubs/ArrayMethodILEmitter.cs8
-rw-r--r--src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/CompilerTypeSystemContext.BoxedTypes.cs32
-rw-r--r--src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/CompilerTypeSystemContext.InterfaceThunks.cs34
-rw-r--r--src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/ArrayOfFrozenObjectsNode.cs2
-rw-r--r--src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/GenericLookupResult.cs36
-rw-r--r--src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/Target_X86/X86ReadyToRunGenericHelperNode.cs241
-rw-r--r--src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/Target_X86/X86ReadyToRunHelperNode.cs127
-rw-r--r--src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/JitHelper.cs18
-rw-r--r--src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/ObjectWriter/CoffObjectWriter.cs4
-rw-r--r--src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/SubstitutedILProvider.cs6
-rw-r--r--src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/TypePreinit.cs13
-rw-r--r--src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs6
-rw-r--r--src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ReadyToRunSignature.cs8
-rw-r--r--src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs6
-rw-r--r--src/coreclr/tools/aot/ILCompiler/repro/repro.csproj4
-rw-r--r--src/coreclr/tools/superpmi/superpmi-shared/compileresult.cpp31
-rw-r--r--src/coreclr/vm/CMakeLists.txt7
-rw-r--r--src/coreclr/vm/amd64/CrtHelpers.asm79
-rw-r--r--src/coreclr/vm/amd64/cgenamd64.cpp2
-rw-r--r--src/coreclr/vm/amd64/crthelpers.S74
-rw-r--r--src/coreclr/vm/amd64/gmsamd64.cpp2
-rw-r--r--src/coreclr/vm/appdomain.cpp2
-rw-r--r--src/coreclr/vm/arm/crthelpers.S51
-rw-r--r--src/coreclr/vm/arm/stubs.cpp2
-rw-r--r--src/coreclr/vm/arm64/crthelpers.S33
-rw-r--r--src/coreclr/vm/arm64/crthelpers.asm81
-rw-r--r--src/coreclr/vm/arm64/stubs.cpp2
-rw-r--r--src/coreclr/vm/ceeload.cpp170
-rw-r--r--src/coreclr/vm/ceeload.h2
-rw-r--r--src/coreclr/vm/ceeload.inl2
-rw-r--r--src/coreclr/vm/clrtocomcall.cpp2
-rw-r--r--src/coreclr/vm/commtmemberinfomap.cpp2
-rw-r--r--src/coreclr/vm/comutilnative.cpp19
-rw-r--r--src/coreclr/vm/comutilnative.h2
-rw-r--r--src/coreclr/vm/comwaithandle.cpp5
-rw-r--r--src/coreclr/vm/comwaithandle.h2
-rw-r--r--src/coreclr/vm/corelib.h5
-rw-r--r--src/coreclr/vm/debugdebugger.cpp15
-rw-r--r--src/coreclr/vm/dispatchinfo.cpp5
-rw-r--r--src/coreclr/vm/dllimport.cpp3
-rw-r--r--src/coreclr/vm/ecall.cpp14
-rw-r--r--src/coreclr/vm/ecall.h2
-rw-r--r--src/coreclr/vm/ecalllist.h3
-rw-r--r--src/coreclr/vm/eetwain.cpp37
-rw-r--r--src/coreclr/vm/excep.cpp10
-rw-r--r--src/coreclr/vm/exceptionhandling.cpp49
-rw-r--r--src/coreclr/vm/exceptionhandlingqcalls.h2
-rw-r--r--src/coreclr/vm/interoputil.cpp2
-rw-r--r--src/coreclr/vm/jithelpers.cpp8
-rw-r--r--src/coreclr/vm/jitinterface.cpp5
-rw-r--r--src/coreclr/vm/jitinterface.h3
-rw-r--r--src/coreclr/vm/loongarch64/crthelpers.S37
-rw-r--r--src/coreclr/vm/loongarch64/stubs.cpp2
-rw-r--r--src/coreclr/vm/metasig.h3
-rw-r--r--src/coreclr/vm/profdetach.cpp2
-rw-r--r--src/coreclr/vm/qcallentrypoints.cpp1
-rw-r--r--src/coreclr/vm/riscv64/crthelpers.S36
-rw-r--r--src/coreclr/vm/threads.cpp7
-rw-r--r--src/installer/Directory.Build.targets2
-rw-r--r--src/installer/managed/Microsoft.NET.HostModel/Microsoft.NET.HostModel.csproj2
-rw-r--r--src/installer/pkg/sfx/Directory.Build.props2
-rw-r--r--src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Crossgen2.sfxproj5
-rw-r--r--src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Host.sfxproj2
-rw-r--r--src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Ref.sfxproj2
-rw-r--r--src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Runtime.sfxproj2
-rw-r--r--src/installer/pkg/sfx/bundle/Microsoft.NETCore.App.Bundle.bundleproj2
-rw-r--r--src/installer/prepare-artifacts.proj12
-rw-r--r--src/libraries/Common/src/Interop/OSX/System.Security.Cryptography.Native.Apple/Interop.SecKeyRef.macOS.cs3
-rw-r--r--src/libraries/Common/src/Microsoft/Win32/SafeHandles/SafeUnicodeStringHandle.cs7
-rw-r--r--src/libraries/Common/src/System/Net/Http/aspnetcore/Http2/Hpack/Huffman.cs2
-rw-r--r--src/libraries/Common/src/System/Net/Security/CertificateValidation.OSX.cs27
-rw-r--r--src/libraries/Common/src/System/Net/Security/CertificateValidation.Unix.cs2
-rw-r--r--src/libraries/Common/src/System/Net/Security/CertificateValidation.Windows.cs2
-rw-r--r--src/libraries/Common/src/System/Number.Parsing.Common.cs2
-rw-r--r--src/libraries/Common/src/System/Security/Cryptography/Pkcs12Kdf.cs10
-rw-r--r--src/libraries/Common/tests/System/Net/Configuration.Sockets.cs19
-rw-r--r--src/libraries/Microsoft.Extensions.DependencyInjection/src/ServiceLookup/CallSiteValidator.cs34
-rw-r--r--src/libraries/Microsoft.Extensions.DependencyInjection/tests/DI.Tests/ServiceProviderValidationTests.cs85
-rw-r--r--src/libraries/System.Collections/src/System/Collections/BitArray.cs20
-rw-r--r--src/libraries/System.Collections/src/System/Collections/Generic/PriorityQueue.cs7
-rw-r--r--src/libraries/System.Collections/tests/BitArray/BitArray_GetSetTests.cs24
-rw-r--r--src/libraries/System.Console/src/System/ConsolePal.Unix.cs25
-rw-r--r--src/libraries/System.Diagnostics.DiagnosticSource/src/DiagnosticSourceUsersGuide.md17
-rw-r--r--src/libraries/System.Diagnostics.DiagnosticSource/src/System/Diagnostics/Metrics/MetricsEventSource.cs6
-rw-r--r--src/libraries/System.Diagnostics.DiagnosticSource/tests/MetricEventSourceTests.cs119
-rw-r--r--src/libraries/System.IO.FileSystem.Watcher/src/System/IO/FileSystemWatcher.Linux.cs4
-rw-r--r--src/libraries/System.IO.Packaging/src/System/IO/Packaging/ContentType.cs2
-rw-r--r--src/libraries/System.Linq.Expressions/src/System/Linq/Expressions/Interpreter/InstructionList.cs1
-rw-r--r--src/libraries/System.Linq/System.Linq.sln24
-rw-r--r--src/libraries/System.Linq/src/CompatibilitySuppressions.xml8
-rw-r--r--src/libraries/System.Linq/src/System.Linq.csproj7
-rw-r--r--src/libraries/System.Linq/src/System/Linq/AppendPrepend.SpeedOpt.cs66
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Cast.SpeedOpt.cs115
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Cast.cs47
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Concat.SpeedOpt.cs18
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Count.cs12
-rw-r--r--src/libraries/System.Linq/src/System/Linq/DefaultIfEmpty.SpeedOpt.cs50
-rw-r--r--src/libraries/System.Linq/src/System/Linq/DefaultIfEmpty.cs5
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Distinct.SpeedOpt.cs10
-rw-r--r--src/libraries/System.Linq/src/System/Linq/ElementAt.cs65
-rw-r--r--src/libraries/System.Linq/src/System/Linq/First.cs14
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Grouping.SpeedOpt.cs77
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Grouping.cs330
-rw-r--r--src/libraries/System.Linq/src/System/Linq/IIListProvider.cs33
-rw-r--r--src/libraries/System.Linq/src/System/Linq/IPartition.cs47
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Iterator.SpeedOpt.cs71
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Iterator.cs30
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Last.cs14
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Lookup.SpeedOpt.cs50
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Lookup.cs66
-rw-r--r--src/libraries/System.Linq/src/System/Linq/OrderBy.cs12
-rw-r--r--src/libraries/System.Linq/src/System/Linq/OrderedEnumerable.SpeedOpt.cs677
-rw-r--r--src/libraries/System.Linq/src/System/Linq/OrderedEnumerable.cs933
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Range.SpeedOpt.cs20
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Repeat.SpeedOpt.cs21
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Reverse.SpeedOpt.cs26
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Select.SpeedOpt.cs245
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Select.cs55
-rw-r--r--src/libraries/System.Linq/src/System/Linq/SelectMany.SpeedOpt.cs8
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Skip.SpeedOpt.cs4
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Skip.cs8
-rw-r--r--src/libraries/System.Linq/src/System/Linq/SkipTake.SpeedOpt.cs (renamed from src/libraries/System.Linq/src/System/Linq/Partition.SpeedOpt.cs)155
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Take.SpeedOpt.cs18
-rw-r--r--src/libraries/System.Linq/src/System/Linq/ToCollection.cs12
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Union.SpeedOpt.cs24
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Where.SpeedOpt.cs108
-rw-r--r--src/libraries/System.Linq/src/System/Linq/Where.cs60
-rw-r--r--src/libraries/System.Linq/tests/AggregateByTests.cs22
-rw-r--r--src/libraries/System.Linq/tests/AppendPrependTests.cs22
-rw-r--r--src/libraries/System.Linq/tests/CastTests.cs97
-rw-r--r--src/libraries/System.Linq/tests/ChunkTests.cs6
-rw-r--r--src/libraries/System.Linq/tests/ConcatTests.cs49
-rw-r--r--src/libraries/System.Linq/tests/DefaultIfEmptyTests.cs19
-rw-r--r--src/libraries/System.Linq/tests/DistinctTests.cs6
-rw-r--r--src/libraries/System.Linq/tests/GroupByTests.cs25
-rw-r--r--src/libraries/System.Linq/tests/IndexTests.cs6
-rw-r--r--src/libraries/System.Linq/tests/MaxTests.cs9
-rw-r--r--src/libraries/System.Linq/tests/MinTests.cs13
-rw-r--r--src/libraries/System.Linq/tests/OrderTests.cs16
-rw-r--r--src/libraries/System.Linq/tests/RangeTests.cs3
-rw-r--r--src/libraries/System.Linq/tests/RepeatTests.cs3
-rw-r--r--src/libraries/System.Linq/tests/SequenceEqualTests.cs29
-rw-r--r--src/libraries/System.Linq/tests/SkipLastTests.cs6
-rw-r--r--src/libraries/System.Linq/tests/SkipWhileTests.cs9
-rw-r--r--src/libraries/System.Linq/tests/TakeLastTests.cs6
-rw-r--r--src/libraries/System.Linq/tests/TakeTests.cs33
-rw-r--r--src/libraries/System.Linq/tests/TakeWhileTests.cs8
-rw-r--r--src/libraries/System.Linq/tests/ToLookupTests.cs65
-rw-r--r--src/libraries/System.Linq/tests/WhereTests.cs44
-rw-r--r--src/libraries/System.Management/src/System/Management/ManagementDateTime.cs2
-rw-r--r--src/libraries/System.Net.Http/src/System/Net/Http/BrowserHttpHandler/BrowserHttpHandler.cs1
-rw-r--r--src/libraries/System.Net.Primitives/src/System/Net/IPAddress.cs1
-rw-r--r--src/libraries/System.Net.Primitives/src/System/Net/IPAddressParser.cs1
-rw-r--r--src/libraries/System.Net.Quic/src/System/Net/Quic/Internal/MsQuicApi.NativeMethods.cs51
-rw-r--r--src/libraries/System.Net.Quic/src/System/Net/Quic/Internal/MsQuicApi.cs14
-rw-r--r--src/libraries/System.Net.Quic/src/System/Net/Quic/QuicConnection.SslConnectionOptions.cs156
-rw-r--r--src/libraries/System.Net.Quic/src/System/Net/Quic/QuicConnection.cs19
-rw-r--r--src/libraries/System.Net.Quic/src/System/Net/Quic/QuicListener.cs5
-rw-r--r--src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicCipherSuitesPolicyTests.cs1
-rw-r--r--src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicPlatformDetectionTests.cs1
-rw-r--r--src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicRemoteExecutorTests.cs1
-rw-r--r--src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicTests.cs4
-rw-r--r--src/libraries/System.Net.Quic/tests/FunctionalTests/QuicConnectionTests.cs4
-rw-r--r--src/libraries/System.Net.Quic/tests/FunctionalTests/QuicListenerTests.cs1
-rw-r--r--src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamConnectedStreamConformanceTests.cs1
-rw-r--r--src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamTests.cs1
-rw-r--r--src/libraries/System.Net.Quic/tests/FunctionalTests/QuicTestBase.cs19
-rw-r--r--src/libraries/System.Net.Quic/tests/FunctionalTests/QuicTestCollection.cs11
-rw-r--r--src/libraries/System.Net.Requests/src/Resources/Strings.resx3
-rw-r--r--src/libraries/System.Net.Requests/src/System.Net.Requests.csproj1
-rw-r--r--src/libraries/System.Net.Requests/src/System/Net/HttpWebRequest.cs96
-rw-r--r--src/libraries/System.Net.Requests/src/System/Net/HttpWebResponse.cs62
-rw-r--r--src/libraries/System.Net.Requests/src/System/Net/ServicePoint/ServicePointManager.cs5
-rw-r--r--src/libraries/System.Net.Requests/tests/HttpWebRequestTest.cs124
-rw-r--r--src/libraries/System.Net.Requests/tests/ServicePointTests/ServicePointManagerTest.cs4
-rw-r--r--src/libraries/System.Net.Sockets/src/System/Net/Sockets/Socket.Tasks.cs12
-rw-r--r--src/libraries/System.Net.Sockets/src/System/Net/Sockets/Socket.cs20
-rw-r--r--src/libraries/System.Net.Sockets/src/System/Net/Sockets/SocketAsyncEventArgs.cs7
-rw-r--r--src/libraries/System.Net.Sockets/src/System/Net/Sockets/SocketPal.Unix.cs4
-rw-r--r--src/libraries/System.Net.Sockets/tests/FunctionalTests/SendTo.cs29
-rw-r--r--src/libraries/System.Net.Sockets/tests/FunctionalTests/SocketAsyncEventArgsTest.cs47
-rw-r--r--src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/netcore/TensorPrimitives.LeadingZeroCount.cs59
-rw-r--r--src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/netcore/TensorPrimitives.TrailingZeroCount.cs49
-rw-r--r--src/libraries/System.Numerics.Tensors/tests/Helpers.cs55
-rw-r--r--src/libraries/System.Numerics.Tensors/tests/TensorPrimitives.Generic.cs176
-rw-r--r--src/libraries/System.Numerics.Tensors/tests/TensorPrimitives.NonGeneric.Single.cs5
-rw-r--r--src/libraries/System.Numerics.Tensors/tests/TensorPrimitivesTests.cs30
-rw-r--r--src/libraries/System.Private.CoreLib/src/System.Private.CoreLib.Shared.projitems4
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Array.cs6
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Buffer.Unix.cs19
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Buffer.Windows.cs16
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Buffer.cs222
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/DateTime.cs1
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Globalization/GregorianCalendarHelper.cs1
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Globalization/TimeSpanFormat.cs3
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/IO/UnmanagedMemoryStream.cs8
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Number.Formatting.cs2
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Numerics/Vector_1.cs1
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/ReadOnlySpan.cs2
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.cs5
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.cs5
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/MemoryMarshal.cs2
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/NativeMemory.cs2
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/SafeBuffer.cs4
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/SearchValues/IndexOfAnyAsciiSearcher.cs1
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/SearchValues/ProbabilisticMap.cs1
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Security/DynamicSecurityMethodAttribute.cs (renamed from src/coreclr/System.Private.CoreLib/src/System/Security/DynamicSecurityMethodAttribute.cs)0
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Span.cs16
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/SpanHelpers.ByteMemOps.cs537
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/SpanHelpers.Packed.cs1
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/SpanHelpers.T.cs2
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/SpanHelpers.cs323
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Threading/Lock.cs46
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.Unix.cs4
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.Windows.cs11
-rw-r--r--src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.cs29
-rw-r--r--src/libraries/System.Private.Xml/src/System/Xml/Schema/XsdDateTime.cs1
-rw-r--r--src/libraries/System.Runtime.Numerics/src/System/Number.BigInteger.cs6
-rw-r--r--src/libraries/System.Runtime.Numerics/tests/BigInteger/parse.cs13
-rw-r--r--src/libraries/System.Runtime/tests/System.Runtime.CompilerServices.Unsafe.Tests/UnsafeTests.cs27
-rw-r--r--src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Runtime/CompilerServices/RuntimeHelpersTests.cs22
-rw-r--r--src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Type/TypeTests.cs5
-rw-r--r--src/libraries/System.Runtime/tests/System.Runtime.Tests/System/ValueTypeTests.cs30
-rw-r--r--src/libraries/System.Security.Cryptography/src/Microsoft/Win32/SafeHandles/SafePasswordHandle.cs5
-rw-r--r--src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/HashProviderCng.cs2
-rw-r--r--src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/X509Certificates/CertificateRequest.cs2
-rw-r--r--src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/X509Certificates/X500NameEncoder.cs1
-rw-r--r--src/libraries/System.Text.Json/src/System/Text/Json/Document/JsonDocument.Parse.cs2
-rw-r--r--src/libraries/System.Text.Json/src/System/Text/Json/Document/JsonElement.cs5
-rw-r--r--src/libraries/System.Text.Json/tests/System.Text.Json.SourceGeneration.Unit.Tests/JsonSourceGeneratorDiagnosticsTests.cs2
-rw-r--r--src/libraries/System.Text.Json/tests/System.Text.Json.SourceGeneration.Unit.Tests/JsonSourceGeneratorIncrementalTests.cs2
-rw-r--r--src/libraries/System.Text.Json/tests/System.Text.Json.SourceGeneration.Unit.Tests/JsonSourceGeneratorTests.cs2
-rw-r--r--src/libraries/System.Text.RegularExpressions/gen/RegexGenerator.Emitter.cs269
-rw-r--r--src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCaseEquivalences.cs2
-rw-r--r--src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCharClass.cs15
-rw-r--r--src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCompiler.cs49
-rw-r--r--src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexFindOptimizations.cs39
-rw-r--r--src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexNode.cs9
-rw-r--r--src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexPrefixAnalyzer.cs310
-rw-r--r--src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/Regex.KnownPattern.Tests.cs1
-rw-r--r--src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/RegexGeneratorOutputTests.cs49
-rw-r--r--src/libraries/System.Text.RegularExpressions/tests/UnitTests/RegexPrefixAnalyzerTests.cs70
-rw-r--r--src/libraries/System.Threading.ThreadPool/tests/ThreadPoolTests.cs94
-rw-r--r--src/libraries/sendtohelixhelp.proj13
-rw-r--r--src/libraries/tests.proj17
-rw-r--r--src/mono/System.Private.CoreLib/System.Private.CoreLib.csproj2
-rw-r--r--src/mono/System.Private.CoreLib/src/System/Buffer.Mono.cs2
-rw-r--r--src/mono/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.Mono.cs16
-rw-r--r--src/mono/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.Mono.cs11
-rw-r--r--src/mono/System.Private.CoreLib/src/System/Security/DynamicSecurityMethodAttribute.cs18
-rw-r--r--src/mono/System.Private.CoreLib/src/System/String.Mono.cs2
-rw-r--r--src/mono/browser/debugger/DebuggerTestSuite/DebuggerTestSuite.csproj11
-rw-r--r--src/mono/browser/runtime/debug.ts7
-rw-r--r--src/mono/browser/runtime/exports-binding.ts3
-rw-r--r--src/mono/browser/runtime/exports-internal.ts4
-rw-r--r--src/mono/browser/runtime/exports.ts14
-rw-r--r--src/mono/browser/runtime/jiterpreter-support.ts13
-rw-r--r--src/mono/browser/runtime/jiterpreter-trace-generator.ts42
-rw-r--r--src/mono/browser/runtime/jiterpreter.ts14
-rw-r--r--src/mono/browser/runtime/pthreads/index.ts2
-rw-r--r--src/mono/browser/runtime/pthreads/ui-thread.ts4
-rw-r--r--src/mono/browser/runtime/types/internal.ts2
-rw-r--r--src/mono/mono/mini/aot-compiler.c17
-rw-r--r--src/mono/mono/mini/exceptions-ppc.c3
-rw-r--r--src/mono/mono/mini/interp/interp.c6
-rw-r--r--src/mono/mono/mini/interp/jiterpreter-opcode-values.h3
-rw-r--r--src/mono/mono/mini/interp/mintops.def9
-rw-r--r--src/mono/mono/mini/interp/mintops.h1
-rw-r--r--src/mono/mono/mini/interp/transform-opt.c30
-rw-r--r--src/mono/mono/mini/interp/transform.c20
-rw-r--r--src/mono/mono/utils/mono-threads-wasm.c8
-rw-r--r--src/mono/mono/utils/mono-threads-wasm.h3
-rw-r--r--src/mono/mono/utils/mono-threads.c6
-rw-r--r--src/mono/nuget/Microsoft.NET.Sdk.WebAssembly.Pack/build/Microsoft.NET.Sdk.WebAssembly.Browser.targets2
-rw-r--r--src/mono/sample/wasm/blazor-frame/blazor.csproj4
-rw-r--r--src/mono/wasm/Wasm.Build.Tests/Blazor/AppsettingsTests.cs2
-rw-r--r--src/mono/wasm/Wasm.Build.Tests/Blazor/BlazorRunOptions.cs7
-rw-r--r--src/mono/wasm/Wasm.Build.Tests/Blazor/BlazorWasmTestBase.cs27
-rw-r--r--src/mono/wasm/Wasm.Build.Tests/Blazor/SimpleMultiThreadedTests.cs2
-rw-r--r--src/mono/wasm/Wasm.Build.Tests/Blazor/WorkloadRequiredTests.cs2
-rw-r--r--src/mono/wasm/Wasm.Build.Tests/BrowserRunner.cs26
-rw-r--r--src/mono/wasm/Wasm.Build.Tests/Common/TestOutputWrapper.cs7
-rw-r--r--src/mono/wasm/Wasm.Build.Tests/ProjectProviderBase.cs4
-rw-r--r--src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/AppTestBase.cs98
-rw-r--r--src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/DebugLevelTests.cs4
-rw-r--r--src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/SignalRClientTests.cs84
-rw-r--r--src/mono/wasm/Wasm.Build.Tests/TestMainJsProjectProvider.cs4
-rw-r--r--src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/App.razor12
-rw-r--r--src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/BlazorHosted.Client.csproj19
-rw-r--r--src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Helper.cs42
-rw-r--r--src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Layout/MainLayout.razor13
-rw-r--r--src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Pages/Chat.razor105
-rw-r--r--src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Program.cs13
-rw-r--r--src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/_Imports.razor6
-rw-r--r--src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/wwwroot/favicon.icobin0 -> 5430 bytes
-rw-r--r--src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/wwwroot/index.html22
-rw-r--r--src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Server/BlazorHosted.Server.csproj19
-rw-r--r--src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Server/ChatHub.cs21
-rw-r--r--src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Server/Program.cs52
-rw-r--r--src/mono/wasm/testassets/WasmBasicTestApp/App/wwwroot/README.md15
-rw-r--r--src/mono/wasm/testassets/WasmBasicTestApp/App/wwwroot/main.js5
-rw-r--r--src/native/corehost/apphost/static/singlefilehost.def3
-rw-r--r--src/native/corehost/json_parser.h10
-rw-r--r--src/native/external/rapidjson-version.txt6
-rw-r--r--src/native/external/rapidjson/README.TXT4
-rw-r--r--src/native/external/rapidjson/allocators.h521
-rw-r--r--src/native/external/rapidjson/cursorstreamwrapper.h78
-rw-r--r--src/native/external/rapidjson/document.h573
-rw-r--r--src/native/external/rapidjson/encodedstream.h2
-rw-r--r--src/native/external/rapidjson/encodings.h2
-rw-r--r--src/native/external/rapidjson/error/en.h118
-rw-r--r--src/native/external/rapidjson/error/error.h136
-rw-r--r--src/native/external/rapidjson/filereadstream.h99
-rw-r--r--src/native/external/rapidjson/filewritestream.h104
-rw-r--r--src/native/external/rapidjson/fwd.h4
-rw-r--r--src/native/external/rapidjson/internal/biginteger.h25
-rw-r--r--src/native/external/rapidjson/internal/clzll.h71
-rw-r--r--src/native/external/rapidjson/internal/diyfp.h26
-rw-r--r--src/native/external/rapidjson/internal/dtoa.h12
-rw-r--r--src/native/external/rapidjson/internal/ieee754.h2
-rw-r--r--src/native/external/rapidjson/internal/itoa.h2
-rw-r--r--src/native/external/rapidjson/internal/meta.h2
-rw-r--r--src/native/external/rapidjson/internal/pow10.h2
-rw-r--r--src/native/external/rapidjson/internal/regex.h740
-rw-r--r--src/native/external/rapidjson/internal/stack.h2
-rw-r--r--src/native/external/rapidjson/internal/strfunc.h16
-rw-r--r--src/native/external/rapidjson/internal/strtod.h17
-rw-r--r--src/native/external/rapidjson/internal/swap.h2
-rw-r--r--src/native/external/rapidjson/istreamwrapper.h2
-rw-r--r--src/native/external/rapidjson/memorybuffer.h70
-rw-r--r--src/native/external/rapidjson/memorystream.h2
-rw-r--r--src/native/external/rapidjson/msinttypes/inttypes.h316
-rw-r--r--src/native/external/rapidjson/msinttypes/stdint.h300
-rw-r--r--src/native/external/rapidjson/ostreamwrapper.h81
-rw-r--r--src/native/external/rapidjson/pointer.h1414
-rw-r--r--src/native/external/rapidjson/prettywriter.h277
-rw-r--r--src/native/external/rapidjson/rapidjson.h123
-rw-r--r--src/native/external/rapidjson/reader.h118
-rw-r--r--src/native/external/rapidjson/schema.h2497
-rw-r--r--src/native/external/rapidjson/stream.h2
-rw-r--r--src/native/external/rapidjson/stringbuffer.h2
-rw-r--r--src/native/external/rapidjson/writer.h30
-rw-r--r--src/tasks/WorkloadBuildTasks/PackageInstaller.cs15
-rw-r--r--src/tests/Common/testenvironment.proj4
-rw-r--r--src/tests/Interop/MarshalAPI/FunctionPointer/GenericFunctionPointer.cs19
-rw-r--r--src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.cs5
-rw-r--r--src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.csproj2
-rw-r--r--src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.swift4
-rw-r--r--src/tests/Interop/Swift/SwiftSelfContext/SwiftSelfContext.csproj2
-rw-r--r--src/tests/JIT/opt/Structs/MemsetMemcpyNullref.cs80
-rw-r--r--src/tests/JIT/opt/Structs/MemsetMemcpyNullref.csproj10
-rw-r--r--src/tests/JIT/opt/Vectorization/BufferMemmoveTailCall.il99
-rw-r--r--src/tests/JIT/opt/Vectorization/BufferMemmoveTailCall.ilproj8
-rw-r--r--src/tests/baseservices/exceptions/unhandled/dependencytodelete.cs12
-rw-r--r--src/tests/baseservices/exceptions/unhandled/dependencytodelete.csproj9
-rw-r--r--src/tests/baseservices/exceptions/unhandled/unhandledTester.cs10
-rw-r--r--src/tests/baseservices/exceptions/unhandled/unhandledTester.csproj5
-rw-r--r--src/tests/baseservices/exceptions/unhandled/unhandledmissingdependency.cs12
-rw-r--r--src/tests/baseservices/exceptions/unhandled/unhandledmissingdependency.csproj19
-rw-r--r--src/tests/issues.targets5
-rw-r--r--src/tests/nativeaot/SmokeTests/Preinitialization/Preinitialization.cs10
-rw-r--r--src/tests/nativeaot/SmokeTests/TrimmingBehaviors/DeadCodeElimination.cs5
-rw-r--r--src/tests/profiler/multiple/multiple.cs4
-rw-r--r--src/tools/illink/illink.sln1
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/FeatureChecksValue.cs11
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/FeatureChecksVisitor.cs (renamed from src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/FeatureCheckVisitor.cs)60
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/LocalDataFlowVisitor.cs17
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlowAnalyzerContext.cs (renamed from src/tools/illink/src/ILLink.RoslynAnalyzer/DataflowAnalyzerContext.cs)0
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/DynamicallyAccessedMembersAnalyzer.cs4
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/ILLink.RoslynAnalyzer.csproj8
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/ISymbolExtensions.cs35
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresAnalyzerBase.cs27
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresAssemblyFilesAnalyzer.cs6
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresDynamicCodeAnalyzer.cs6
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresUnreferencedCodeAnalyzer.cs8
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/FeatureCheckReturnValuePattern.cs70
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisAssignmentPattern.cs2
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisGenericInstantiationPattern.cs2
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisMethodCallPattern.cs2
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisPatternStore.cs21
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisReflectionAccessPattern.cs2
-rw-r--r--src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisVisitor.cs29
-rw-r--r--src/tools/illink/src/ILLink.Shared/DiagnosticId.cs4
-rw-r--r--src/tools/illink/src/ILLink.Shared/SharedStrings.resx14
-rw-r--r--src/tools/illink/src/linker/CompatibilitySuppressions.xml8
-rw-r--r--src/tools/illink/src/linker/Linker.Steps/MarkStep.cs32
-rw-r--r--src/tools/illink/src/linker/Linker/Annotations.cs2
-rw-r--r--src/tools/illink/src/linker/Linker/InterfaceImplementor.cs59
-rw-r--r--src/tools/illink/src/linker/Linker/OverrideInformation.cs74
-rw-r--r--src/tools/illink/src/linker/Linker/TypeMapInfo.cs59
-rw-r--r--src/tools/illink/test/ILLink.RoslynAnalyzer.Tests/DataFlowTests.cs6
-rw-r--r--src/tools/illink/test/ILLink.RoslynAnalyzer.Tests/generated/ILLink.RoslynAnalyzer.Tests.Generator/ILLink.RoslynAnalyzer.Tests.TestCaseGenerator/Inheritance.InterfacesTests.g.cs6
-rw-r--r--src/tools/illink/test/Mono.Linker.Tests.Cases.Expectations/Support/FeatureCheckAttribute.cs17
-rw-r--r--src/tools/illink/test/Mono.Linker.Tests.Cases.Expectations/Support/FeatureDependsOnAttribute.cs16
-rw-r--r--src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/Dependencies/TestFeatures.cs12
-rw-r--r--src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckAttributeDataFlow.cs615
-rw-r--r--src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckAttributeDataFlowTestSubstitutions.xml52
-rw-r--r--src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckDataFlow.cs20
-rw-r--r--src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckDataFlowTestSubstitutions.xml2
-rw-r--r--src/tools/illink/test/Mono.Linker.Tests.Cases/Inheritance.Interfaces/Dependencies/InterfaceImplementedThroughBaseInterface.il48
-rw-r--r--src/tools/illink/test/Mono.Linker.Tests.Cases/Inheritance.Interfaces/InterfaceImplementedThroughBaseInterface.cs34
629 files changed, 20622 insertions, 13260 deletions
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index 5a697ac08819..d76e325e8b6c 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -25,4 +25,5 @@ RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
libssl-dev \
libkrb5-dev \
zlib1g-dev \
- ninja-build
+ ninja-build \
+ tzdata
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index b042cebfc6b9..ecc2cc3d3867 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -21,7 +21,7 @@
/src/mono @marek-safar
-/src/mono/llvm @vargaz @SamMonoRT
+/src/mono/llvm @vargaz @steveisok
/src/mono/mono/arch @vargaz
/src/mono/mono/eglib @vargaz @lambdageek
@@ -36,7 +36,7 @@
/src/mono/mono/eventpipe @lateralusX @lambdageek
-/src/mono/mono/mini @vargaz @lambdageek @SamMonoRT
+/src/mono/mono/mini @vargaz @lambdageek @steveisok
/src/mono/mono/mini/*cfgdump* @vargaz
/src/mono/mono/mini/*exceptions* @vargaz @BrzVlad
/src/mono/mono/mini/*llvm* @vargaz @fanyang-mono
@@ -50,7 +50,7 @@
/src/mono/mono/mini/*simd* @fanyang-mono
/src/mono/mono/profiler @BrzVlad @lambdageek
-/src/mono/mono/sgen @BrzVlad @lambdageek @SamMonoRT
+/src/mono/mono/sgen @BrzVlad @lambdageek
/src/mono/mono/utils @vargaz @lambdageek
/src/mono/mono/utils/*-win* @lateralusX @lambdageek
diff --git a/.github/ISSUE_TEMPLATE/05_blank_issue.md b/.github/ISSUE_TEMPLATE/04_blank_issue.md
index d1429bfd4c1d..d1429bfd4c1d 100644
--- a/.github/ISSUE_TEMPLATE/05_blank_issue.md
+++ b/.github/ISSUE_TEMPLATE/04_blank_issue.md
diff --git a/.github/ISSUE_TEMPLATE/04_ci_known_issue.yml b/.github/ISSUE_TEMPLATE/04_ci_known_issue.yml
deleted file mode 100644
index 17ec4e5e5ec9..000000000000
--- a/.github/ISSUE_TEMPLATE/04_ci_known_issue.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-name: CI Known Issue Report
-description: Create a known issue directly
-labels: ["blocking-clean-ci","Known Build Error"]
-body:
- - type: markdown
- attributes:
- value: |
- Use this template to report issues currently affecting PR stability, be it build or test failures.
- - type: textarea
- id: background
- attributes:
- label: Error Blob
- description: Please identify a clear error string that can help identify future instances of this issue. For more information on how to fill this check our issue triage guidelines at [Failure Analysis](/dotnet/runtime/blob/main/docs/workflow/ci/failure-analysis.md#what-to-do-if-you-determine-the-failure-is-unrelated)
- value: |
- ```json
- {
- "ErrorMessage": "",
- "BuildRetry": false,
- "ErrorPattern": "",
- "ExcludeConsoleLog": true
- }
- ```
- validations:
- required: true
- - type: textarea
- id: repro-steps
- attributes:
- label: Reproduction Steps
- description: |
- If possible describe where you observe the issue with links and any other relevant details.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index 54d8c5740bad..b14edd954ede 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -18,3 +18,6 @@ contact_links:
- name: Issue with WPF
url: https://github.com/dotnet/wpf/issues/new/choose
about: Please open issues relating to WPF in dotnet/wpf.
+ - name: CI Known Issue Report
+ url: https://helix.dot.net/BuildAnalysis/CreateKnownIssues
+ about: Use the helper to create a Known Issue in CI if failures in your runs are unrelated to your change. See [Failure Analysis](https://github.com/dotnet/runtime/blob/main/docs/workflow/ci/failure-analysis.md#what-to-do-if-you-determine-the-failure-is-unrelated) for triage instructions.
diff --git a/Directory.Build.props b/Directory.Build.props
index 26e112fab56e..1969e3e16a2a 100644
--- a/Directory.Build.props
+++ b/Directory.Build.props
@@ -313,6 +313,8 @@
'$(OfficialBuildId)' == ''">true</DisableSourceLink>
<!-- Runtime doesn't support Arcade-driven target framework filtering. -->
<NoTargetFrameworkFiltering>true</NoTargetFrameworkFiltering>
+
+ <NativeBuildPartitionPropertiesToRemove>ClrFullNativeBuild;ClrRuntimeSubset;ClrJitSubset;ClrPalTestsSubset;ClrAllJitsSubset;ClrILToolsSubset;ClrNativeAotSubset;ClrSpmiSubset;ClrCrossComponentsSubset;ClrDebugSubset;HostArchitecture;PgoInstrument;NativeOptimizationDataSupported;CMakeArgs</NativeBuildPartitionPropertiesToRemove>
</PropertyGroup>
<!-- RepositoryEngineeringDir isn't set when Installer tests import this file. -->
diff --git a/docs/area-owners.md b/docs/area-owners.md
index 52cb16d8d8d7..6795bebb817f 100644
--- a/docs/area-owners.md
+++ b/docs/area-owners.md
@@ -73,9 +73,9 @@ Note: Editing this file doesn't update the mapping used by `@msftbot` for area-s
| area-System.Composition | @ericstj | @dotnet/area-system-composition | |
| area-System.Configuration | @ericstj | @dotnet/area-system-configuration | |
| area-System.Console | @jeffhandley | @dotnet/area-system-console | |
-| area-System.Data | @ajcvickers | @ajcvickers @davoudeshtehari @david-engel @roji | <ul><li>Odbc, OleDb - @saurabh500</li></ul> |
-| area-System.Data.Odbc | @ajcvickers | @ajcvickers @roji | |
-| area-System.Data.OleDB | @ajcvickers | @ajcvickers @roji | |
+| area-System.Data | @sammonort | @ajcvickers @davoudeshtehari @david-engel @roji | <ul><li>Odbc, OleDb - @saurabh500</li></ul> |
+| area-System.Data.Odbc | @sammonort | @ajcvickers @roji | |
+| area-System.Data.OleDB | @sammonort | @ajcvickers @roji | |
| area-System.Data.SqlClient | @David-Engel | @davoudeshtehari @david-engel @jrahnama | Archived component - limited churn/contributions (see https://devblogs.microsoft.com/dotnet/introducing-the-new-microsoftdatasqlclient/) |
| area-System.DateTime | @ericstj | @dotnet/area-system-datetime | System namespace APIs related to dates and times, including DateOnly, DateTime, DateTimeKind, DateTimeOffset, DayOfWeek, TimeOnly, TimeSpan, TimeZone, and TimeZoneInfo |
| area-System.Diagnostics | @tommcdon | @dotnet/area-system-diagnostics | |
@@ -135,7 +135,7 @@ Note: Editing this file doesn't update the mapping used by `@msftbot` for area-s
| area-System.Threading.Channels | @ericstj | @dotnet/area-system-threading-channels | Consultants: @stephentoub |
| area-System.Threading.RateLimiting | @rafikiassumani-msft | @BrennanConroy @halter73 | |
| area-System.Threading.Tasks | @ericstj | @dotnet/area-system-threading-tasks | Consultants: @stephentoub |
-| area-System.Transactions | @ajcvickers | @roji | |
+| area-System.Transactions | @sammonort | @roji | |
| area-System.Xml | @jeffhandley | @dotnet/area-system-xml | |
| area-TieredCompilation-coreclr | @mangod9 | @kouvel | |
| area-Tools-ILLink | @agocke | @dotnet/illink | |
diff --git a/docs/design/coreclr/botr/guide-for-porting.md b/docs/design/coreclr/botr/guide-for-porting.md
index 5d2c01aa52d0..f7ca105bf165 100644
--- a/docs/design/coreclr/botr/guide-for-porting.md
+++ b/docs/design/coreclr/botr/guide-for-porting.md
@@ -413,12 +413,6 @@ Here is an annotated list of the stubs implemented for Unix on Arm64.
Today use of this feature on Unix requires hand-written IL. On Windows
this feature is commonly used by C++/CLI
-3. EH Correctness. Some helpers are written in assembly to provide well known
- locations for NullReferenceExceptions to be generated out of a SIGSEGV
- signal.
-
- 1. `JIT_MemSet`, and `JIT_MemCpy` have this requirement
-
#### cgencpu.h
This header is included by various code in the VM directory. It provides a large
diff --git a/docs/design/coreclr/jit/first-class-structs.md b/docs/design/coreclr/jit/first-class-structs.md
index dc017aee75f2..4211f75ff745 100644
--- a/docs/design/coreclr/jit/first-class-structs.md
+++ b/docs/design/coreclr/jit/first-class-structs.md
@@ -94,10 +94,6 @@ encountered by most phases of the JIT:
[#21705](https://github.com/dotnet/coreclr/pull/21705) they are no longer large nodes.
* `GT_STORE_OBJ` and `GT_STORE_BLK` have the same structure as `GT_OBJ` and `GT_BLK`, respectively
* `Data()` is op2
- * `GT_STORE_DYN_BLK` (GenTreeStoreDynBlk extends GenTreeBlk)
- * Additional child `gtDynamicSize`
- * Note that these aren't really struct stores; they represent dynamically sized blocks
- of arbitrary data.
* For `GT_LCL_FLD` nodes, we store a pointer to `ClassLayout` in the node.
* For `GT_LCL_VAR` nodes, the `ClassLayout` is obtained from the `LclVarDsc`.
diff --git a/docs/design/coreclr/jit/ryujit-overview.md b/docs/design/coreclr/jit/ryujit-overview.md
index cdb17002ee19..5e63d38e98f6 100644
--- a/docs/design/coreclr/jit/ryujit-overview.md
+++ b/docs/design/coreclr/jit/ryujit-overview.md
@@ -222,6 +222,7 @@ The top-level function of interest is `Compiler::compCompile`. It invokes the fo
| [Common Subexpression Elimination (CSE)](#cse) | Elimination of redundant subexressions based on value numbers. |
| [Assertion Propagation](#assertion-propagation) | Utilizes value numbers to propagate and transform based on properties such as non-nullness. |
| [Range analysis](#range-analysis) | Eliminate array index range checks based on value numbers and assertions |
+| [Induction variable optimization](#iv-opts) | Optimize induction variables used inside natural loops based on scalar evolution analysis |
| [VN-based dead store elimination](#vn-based-dead-store-elimination) | Eliminate stores that do not change the value of a local. |
| [If conversion](#if-conversion) | Transform conditional definitions into `GT_SELECT` operators. |
| [Rationalization](#rationalization) | Flowgraph order changes from `FGOrderTree` to `FGOrderLinear`. All `GT_COMMA` nodes are transformed. |
@@ -347,6 +348,11 @@ reused.
Utilizes value numbers to propagate and transform based on properties such as non-nullness.
+### <a name="iv-opts"></a>Induction variable optimization
+
+Performs scalar evolution analysis and utilized it to optimize induction variables inside loops.
+Currently this entails IV widening which is done on x64 only.
+
### <a name="range-analysis"></a>Range analysis
Optimize array index range checks based on value numbers and assertions.
diff --git a/docs/design/coreclr/jit/ryujit-tutorial.md b/docs/design/coreclr/jit/ryujit-tutorial.md
index 34466e45afbc..ec900ccc8cd9 100644
--- a/docs/design/coreclr/jit/ryujit-tutorial.md
+++ b/docs/design/coreclr/jit/ryujit-tutorial.md
@@ -447,6 +447,10 @@ This is the same diagram as before, but with additional links to indicate execut
- Determine initial value for dependent phis
- Eliminate checks where the range of the index is within the check range
+### Induction Variable Optimization
+- Perform scalar evolution analysis to describe values of IR nodes inside loops
+- Perform IV widening on x64 to avoid unnecessary zero extensions for array/span indexing
+
## RyuJIT Back-End
### Rationalization
diff --git a/docs/workflow/ci/failure-analysis.md b/docs/workflow/ci/failure-analysis.md
index 57917c841316..58a11c06bdfa 100644
--- a/docs/workflow/ci/failure-analysis.md
+++ b/docs/workflow/ci/failure-analysis.md
@@ -12,6 +12,19 @@
## Triaging errors seen in CI
+## Summary
+
+**Passing Build Analysis is required to merge into the runtime repo**.
+
+To resolve failures, do the following, in order:
+
+1. Fix the problem if your PR is the cause.
+2. For all failures not in the "Known test errors" section, [try to file a Known Build Error issue](#what-to-do-if-you-determine-the-failure-is-unrelated).
+3. If all else fails, perform a [manual bypass](#bypassing-build-analysis).
+
+
+## Details
+
In case of failure, any PR on the runtime will have a failed GitHub check - PR Build Analysis - which has a summary of all failures, including a list of matching known issues as well as any regressions introduced to the build or the tests. This tab should be your first stop for analyzing the PR failures.
![Build analysis check](analysis-check.png)
@@ -78,6 +91,7 @@ If you have considered all the diagnostic artifacts and determined the failure i
````
It already contains most of the essential information, but *it is very important that you fill out the json blob*.
+ - You can now use the [Build Analysis Known Issue Helper](https://helix.dot.net/BuildAnalysis/CreateKnownIssues) to create an issue. It assists in adding the right set of labels, fill the necessary paths in the json blob, and it will validate that it matches the text presented for the issue found in the logs.
- You can add into the `ErrorMessage` field the string that you found uniquely identifies the issue. In case you need to use a regex, use the `ErrorPattern` field instead. This is a limited to a single-line, non-backtracking regex as described [here](https://github.com/dotnet/arcade/blob/main/Documentation/Projects/Build%20Analysis/KnownIssues.md#regex-matching). This regex also needs to be appropriately escaped. Check the [arcade known issues](https://github.com/dotnet/arcade/blob/main/Documentation/Projects/Build%20Analysis/KnownIssues.md#filling-out-known-issues-json-blob) documentation for a good guide on proper regex and JSON escaping.
- The field `ExcludeConsoleLog` describes if the execution logs should be considered on top of the individual test results. **For most cases, this should be set to `true` as the failure will happen within a single test**. Setting it to `false` will mean all failures within an xUnit set of tests will also get attributed to this particular error, since there's one log describing all the problems. Due to limitations in Known Issues around rate limiting and xUnit resiliency, setting `ExcludeConsoleLog=false` is necessary in two scenarios:
+ Nested tests as reported to Azure DevOps. Essentially this means theory failures, which look like this when reported in Azure DevOps: ![xUnit theory seen in azure devops](theory-azdo.png).
@@ -95,6 +109,16 @@ After you do this, if the failure is occurring frequently as per the data captur
There are plenty of intermittent failures that won't manifest again on a retry. Therefore these steps should be followed for every iteration of the PR build, e.g. before retrying/rebuilding.
+### Bypassing build analysis
+
+To unconditionally bypass the build analysis check (turn it green), you can add a comment to your PR with the following text:
+
+```
+/ba-g <reason>
+```
+
+For more information, see https://github.com/dotnet/arcade/blob/main/Documentation/Projects/Build%20Analysis/EscapeMechanismforBuildAnalysis.md
+
### Examples of Build Analysis
#### Good usage examples
diff --git a/eng/DotNetBuild.props b/eng/DotNetBuild.props
index 53d03c7f4dd1..a6350c7fea93 100644
--- a/eng/DotNetBuild.props
+++ b/eng/DotNetBuild.props
@@ -21,6 +21,10 @@
<_hostArch>$(_hostRid.Substring($(_hostRidPlatformIndex)).TrimStart('-'))</_hostArch>
<LogVerbosity Condition="'$(LogVerbosity)' == ''">minimal</LogVerbosity>
+
+ <!-- When using the inner clone functionality in the repo build, copy the sources
+ directly as source-link doesn't support local path clones anymore. -->
+ <CopySrcInsteadOfClone>true</CopySrcInsteadOfClone>
</PropertyGroup>
<PropertyGroup Label="ShortStacks">
diff --git a/eng/Subsets.props b/eng/Subsets.props
index dd284ea6d997..29d7467e6b43 100644
--- a/eng/Subsets.props
+++ b/eng/Subsets.props
@@ -255,7 +255,7 @@
</PropertyGroup>
<ItemGroup Condition="'$(ClrRuntimeBuildSubsets)' != '' or $(_subset.Contains('+clr.nativeprereqs+'))">
- <ProjectToBuild Include="$(CoreClrProjectRoot)runtime-prereqs.proj" Category="clr" />
+ <ProjectToBuild Include="$(CoreClrProjectRoot)runtime-prereqs.proj" Category="clr" GlobalPropertiesToRemove="$(NativeBuildPartitionPropertiesToRemove)" />
</ItemGroup>
<ItemGroup Condition="'$(ClrRuntimeBuildSubsets)' != ''">
diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml
index 3ee91b259e46..822b757842b7 100644
--- a/eng/Version.Details.xml
+++ b/eng/Version.Details.xml
@@ -1,8 +1,8 @@
<Dependencies>
<ProductDependencies>
- <Dependency Name="Microsoft.NETCore.Runtime.ICU.Transport" Version="9.0.0-preview.2.24119.1">
+ <Dependency Name="Microsoft.NETCore.Runtime.ICU.Transport" Version="9.0.0-preview.3.24123.1">
<Uri>https://github.com/dotnet/icu</Uri>
- <Sha>c15a038f408fef6814e5f9c0bf8882bcdf53a290</Sha>
+ <Sha>9c4c9995bc756a01597b5efb0e452ef879a76d99</Sha>
</Dependency>
<Dependency Name="System.Net.MsQuic.Transport" Version="9.0.0-alpha.1.24067.1">
<Uri>https://github.com/dotnet/msquic</Uri>
@@ -12,9 +12,9 @@
<Uri>https://github.com/dotnet/wcf</Uri>
<Sha>7f504aabb1988e9a093c1e74d8040bd52feb2f01</Sha>
</Dependency>
- <Dependency Name="Microsoft.NET.Runtime.Emscripten.3.1.34.Python.win-x64" Version="9.0.0-preview.2.24121.1">
+ <Dependency Name="Microsoft.NET.Runtime.Emscripten.3.1.34.Python.win-x64" Version="9.0.0-preview.3.24126.1">
<Uri>https://github.com/dotnet/emsdk</Uri>
- <Sha>2d3f1fe4807a21879cedba9d3fde8cd329fb17f2</Sha>
+ <Sha>0f3e462442af5fe65271e3185d5b645ad40a6041</Sha>
</Dependency>
<Dependency Name="runtime.linux-arm64.Microsoft.NETCore.Runtime.ObjWriter" Version="16.0.5-alpha.1.24112.1" CoherentParentDependency="Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport">
<Uri>https://github.com/dotnet/llvm-project</Uri>
@@ -90,121 +90,121 @@
<Sha>a045dd54a4c44723c215d992288160eb1401bb7f</Sha>
<SourceBuild RepoName="command-line-api" ManagedOnly="true" />
</Dependency>
- <Dependency Name="Microsoft.DotNet.Cecil" Version="0.11.4-alpha.24119.1">
+ <Dependency Name="Microsoft.DotNet.Cecil" Version="0.11.4-alpha.24120.1">
<Uri>https://github.com/dotnet/cecil</Uri>
- <Sha>61250b0ed403b3f9b69a33f7d8f66f311338d6a1</Sha>
+ <Sha>0d0bc8e0f47fdae9834e1eac678f364c50946133</Sha>
</Dependency>
<!-- Intermediate is necessary for source build. -->
- <Dependency Name="Microsoft.SourceBuild.Intermediate.cecil" Version="0.11.4-alpha.24119.1">
+ <Dependency Name="Microsoft.SourceBuild.Intermediate.cecil" Version="0.11.4-alpha.24120.1">
<Uri>https://github.com/dotnet/cecil</Uri>
- <Sha>61250b0ed403b3f9b69a33f7d8f66f311338d6a1</Sha>
+ <Sha>0d0bc8e0f47fdae9834e1eac678f364c50946133</Sha>
<SourceBuild RepoName="cecil" ManagedOnly="true" />
</Dependency>
- <Dependency Name="Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport" Version="9.0.0-preview.2.24121.1">
+ <Dependency Name="Microsoft.NET.Workload.Emscripten.Current.Manifest-9.0.100.Transport" Version="9.0.0-preview.3.24126.1">
<Uri>https://github.com/dotnet/emsdk</Uri>
- <Sha>2d3f1fe4807a21879cedba9d3fde8cd329fb17f2</Sha>
+ <Sha>0f3e462442af5fe65271e3185d5b645ad40a6041</Sha>
</Dependency>
<!-- Intermediate is necessary for source build. -->
- <Dependency Name="Microsoft.SourceBuild.Intermediate.emsdk" Version="9.0.0-preview.2.24121.1">
+ <Dependency Name="Microsoft.SourceBuild.Intermediate.emsdk" Version="9.0.0-preview.3.24126.1">
<Uri>https://github.com/dotnet/emsdk</Uri>
- <Sha>2d3f1fe4807a21879cedba9d3fde8cd329fb17f2</Sha>
+ <Sha>0f3e462442af5fe65271e3185d5b645ad40a6041</Sha>
<SourceBuild RepoName="emsdk" ManagedOnly="true" />
</Dependency>
<!-- Intermediate is necessary for source build. -->
- <Dependency Name="Microsoft.SourceBuild.Intermediate.source-build-reference-packages" Version="9.0.0-alpha.1.24113.1">
+ <Dependency Name="Microsoft.SourceBuild.Intermediate.source-build-reference-packages" Version="9.0.0-alpha.1.24123.3">
<Uri>https://github.com/dotnet/source-build-reference-packages</Uri>
- <Sha>2f79f97b7a6a0cf2ca3297a8fa526e6f4ea98ce2</Sha>
+ <Sha>62fb9a85e5c4af657b0014fd6d6588c139d0bb4f</Sha>
<SourceBuild RepoName="source-build-reference-packages" ManagedOnly="true" />
</Dependency>
<!-- Intermediate is necessary for source build. -->
- <Dependency Name="Microsoft.SourceBuild.Intermediate.source-build-externals" Version="9.0.0-alpha.1.24112.1">
+ <Dependency Name="Microsoft.SourceBuild.Intermediate.source-build-externals" Version="9.0.0-alpha.1.24127.2">
<Uri>https://github.com/dotnet/source-build-externals</Uri>
- <Sha>ddfb60463c966af55fd0e222c2266170e83d1324</Sha>
+ <Sha>88f13afba58a6c455039d71bbdd2cff3d847b236</Sha>
<SourceBuild RepoName="source-build-externals" ManagedOnly="true" />
</Dependency>
</ProductDependencies>
<ToolsetDependencies>
- <Dependency Name="Microsoft.DotNet.Arcade.Sdk" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.Arcade.Sdk" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
<!-- Intermediate is necessary for source build. -->
- <Dependency Name="Microsoft.SourceBuild.Intermediate.arcade" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.SourceBuild.Intermediate.arcade" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
<SourceBuild RepoName="arcade" ManagedOnly="true" />
</Dependency>
- <Dependency Name="Microsoft.DotNet.XliffTasks" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.XliffTasks" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.Helix.Sdk" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.Helix.Sdk" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.GenAPI" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.GenAPI" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.GenFacades" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.GenFacades" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.XUnitAssert" Version="2.6.7-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.XUnitAssert" Version="2.6.7-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.XUnitExtensions" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.XUnitExtensions" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.XUnitConsoleRunner" Version="2.6.7-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.XUnitConsoleRunner" Version="2.6.7-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.Build.Tasks.Archives" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.Build.Tasks.Archives" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.Build.Tasks.Packaging" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.Build.Tasks.Packaging" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.Build.Tasks.Installers" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.Build.Tasks.Installers" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.Build.Tasks.Templating" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.Build.Tasks.Templating" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.Build.Tasks.Workloads" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.Build.Tasks.Workloads" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.CodeAnalysis" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.CodeAnalysis" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.Build.Tasks.TargetFramework" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.Build.Tasks.TargetFramework" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.RemoteExecutor" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.RemoteExecutor" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.Build.Tasks.Feed" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.Build.Tasks.Feed" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.VersionTools.Tasks" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.VersionTools.Tasks" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.SharedFramework.Sdk" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.SharedFramework.Sdk" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
<Dependency Name="System.ComponentModel.TypeConverter.TestData" Version="9.0.0-beta.24119.1">
<Uri>https://github.com/dotnet/runtime-assets</Uri>
@@ -314,43 +314,43 @@
<Uri>https://github.com/dotnet/llvm-project</Uri>
<Sha>9885e5aecc176ca701fc3527877d608bf7ccfb7d</Sha>
</Dependency>
- <Dependency Name="Microsoft.NETCore.App.Runtime.win-x64" Version="9.0.0-preview.2.24116.2">
+ <Dependency Name="Microsoft.NETCore.App.Runtime.win-x64" Version="9.0.0-preview.3.24126.1">
<Uri>https://github.com/dotnet/runtime</Uri>
- <Sha>d972a19c077e899d0b3fff97d955968e50906396</Sha>
+ <Sha>c55c4d50793c878cc73ae6ca3335f2b6b3ccc8a4</Sha>
</Dependency>
- <Dependency Name="runtime.native.System.IO.Ports" Version="9.0.0-preview.2.24116.2">
+ <Dependency Name="runtime.native.System.IO.Ports" Version="9.0.0-preview.3.24126.1">
<Uri>https://github.com/dotnet/runtime</Uri>
- <Sha>d972a19c077e899d0b3fff97d955968e50906396</Sha>
+ <Sha>c55c4d50793c878cc73ae6ca3335f2b6b3ccc8a4</Sha>
</Dependency>
- <Dependency Name="Microsoft.NETCore.ILAsm" Version="9.0.0-preview.2.24116.2">
+ <Dependency Name="Microsoft.NETCore.ILAsm" Version="9.0.0-preview.3.24126.1">
<Uri>https://github.com/dotnet/runtime</Uri>
- <Sha>d972a19c077e899d0b3fff97d955968e50906396</Sha>
+ <Sha>c55c4d50793c878cc73ae6ca3335f2b6b3ccc8a4</Sha>
</Dependency>
- <Dependency Name="Microsoft.NET.Sdk.IL" Version="9.0.0-preview.2.24116.2">
+ <Dependency Name="Microsoft.NET.Sdk.IL" Version="9.0.0-preview.3.24126.1">
<Uri>https://github.com/dotnet/runtime</Uri>
- <Sha>d972a19c077e899d0b3fff97d955968e50906396</Sha>
+ <Sha>c55c4d50793c878cc73ae6ca3335f2b6b3ccc8a4</Sha>
</Dependency>
- <Dependency Name="System.Text.Json" Version="9.0.0-preview.2.24116.2">
+ <Dependency Name="System.Text.Json" Version="9.0.0-preview.3.24126.1">
<Uri>https://github.com/dotnet/runtime</Uri>
- <Sha>d972a19c077e899d0b3fff97d955968e50906396</Sha>
+ <Sha>c55c4d50793c878cc73ae6ca3335f2b6b3ccc8a4</Sha>
</Dependency>
<!-- Intermediate is necessary for source build. -->
- <Dependency Name="Microsoft.SourceBuild.Intermediate.runtime.linux-x64" Version="9.0.0-preview.2.24116.2">
+ <Dependency Name="Microsoft.SourceBuild.Intermediate.runtime.linux-x64" Version="9.0.0-preview.3.24126.1">
<Uri>https://github.com/dotnet/runtime</Uri>
- <Sha>d972a19c077e899d0b3fff97d955968e50906396</Sha>
+ <Sha>c55c4d50793c878cc73ae6ca3335f2b6b3ccc8a4</Sha>
<SourceBuild RepoName="runtime" ManagedOnly="false" />
</Dependency>
- <Dependency Name="Microsoft.DotNet.ILCompiler" Version="9.0.0-preview.2.24116.2">
+ <Dependency Name="Microsoft.DotNet.ILCompiler" Version="9.0.0-preview.3.24126.1">
<Uri>https://github.com/dotnet/runtime</Uri>
- <Sha>d972a19c077e899d0b3fff97d955968e50906396</Sha>
+ <Sha>c55c4d50793c878cc73ae6ca3335f2b6b3ccc8a4</Sha>
</Dependency>
- <Dependency Name="System.Reflection.Metadata" Version="9.0.0-preview.2.24116.2">
+ <Dependency Name="System.Reflection.Metadata" Version="9.0.0-preview.3.24126.1">
<Uri>https://github.com/dotnet/runtime</Uri>
- <Sha>d972a19c077e899d0b3fff97d955968e50906396</Sha>
+ <Sha>c55c4d50793c878cc73ae6ca3335f2b6b3ccc8a4</Sha>
</Dependency>
- <Dependency Name="System.Reflection.MetadataLoadContext" Version="9.0.0-preview.2.24116.2">
+ <Dependency Name="System.Reflection.MetadataLoadContext" Version="9.0.0-preview.3.24126.1">
<Uri>https://github.com/dotnet/runtime</Uri>
- <Sha>d972a19c077e899d0b3fff97d955968e50906396</Sha>
+ <Sha>c55c4d50793c878cc73ae6ca3335f2b6b3ccc8a4</Sha>
</Dependency>
<Dependency Name="Microsoft.DotNet.XHarness.TestRunners.Common" Version="9.0.0-prerelease.24119.1">
<Uri>https://github.com/dotnet/xharness</Uri>
@@ -364,9 +364,9 @@
<Uri>https://github.com/dotnet/xharness</Uri>
<Sha>8aa2a4cb80000ebb46ee61cd6ac29b2e63ebe87c</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.PackageTesting" Version="9.0.0-beta.24112.1">
+ <Dependency Name="Microsoft.DotNet.PackageTesting" Version="9.0.0-beta.24151.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>c3f5cbfb2829795294f5c2d9fa5a0522f47e91fb</Sha>
+ <Sha>d4a7b37f268cb5fe89f4472ef5341629cd040dfe</Sha>
</Dependency>
<Dependency Name="optimization.windows_nt-x64.MIBC.Runtime" Version="1.0.0-prerelease.24104.2">
<Uri>https://dev.azure.com/dnceng/internal/_git/dotnet-optimization</Uri>
@@ -384,9 +384,9 @@
<Uri>https://dev.azure.com/dnceng/internal/_git/dotnet-optimization</Uri>
<Sha>db9f1c2362565f3ef41c8e8feb5ed49ab11a6459</Sha>
</Dependency>
- <Dependency Name="Microsoft.DotNet.HotReload.Utils.Generator.BuildTool" Version="9.0.0-alpha.0.24119.1">
+ <Dependency Name="Microsoft.DotNet.HotReload.Utils.Generator.BuildTool" Version="9.0.0-alpha.0.24120.1">
<Uri>https://github.com/dotnet/hotreload-utils</Uri>
- <Sha>ec73ebf54c4ae98ac1450fcf95998180d4160f31</Sha>
+ <Sha>81cdd6568c7360cf337b1ab6a2dcf2ce84530a7f</Sha>
</Dependency>
<Dependency Name="System.Runtime.Numerics.TestData" Version="9.0.0-beta.24119.1">
<Uri>https://github.com/dotnet/runtime-assets</Uri>
@@ -399,19 +399,18 @@
<Dependency Name="Microsoft.CodeAnalysis" Version="4.10.0-2.24114.13">
<Uri>https://github.com/dotnet/roslyn</Uri>
<Sha>77372c66fd54927312b5b0a2e399e192f74445c9</Sha>
- <SourceBuild RepoName="roslyn" ManagedOnly="true" />
</Dependency>
<Dependency Name="Microsoft.CodeAnalysis.CSharp" Version="4.10.0-2.24114.13">
<Uri>https://github.com/dotnet/roslyn</Uri>
<Sha>77372c66fd54927312b5b0a2e399e192f74445c9</Sha>
</Dependency>
- <Dependency Name="Microsoft.CodeAnalysis.Analyzers" Version="3.11.0-beta1.24121.1">
+ <Dependency Name="Microsoft.CodeAnalysis.Analyzers" Version="3.11.0-beta1.24122.2">
<Uri>https://github.com/dotnet/roslyn-analyzers</Uri>
- <Sha>4195460a822168a75aa3d31b4a8d0fa88c42855c</Sha>
+ <Sha>ba8b7f2c3ae092d0301f0c5e49bd30340af553c8</Sha>
</Dependency>
- <Dependency Name="Microsoft.CodeAnalysis.NetAnalyzers" Version="9.0.0-preview.24121.1">
+ <Dependency Name="Microsoft.CodeAnalysis.NetAnalyzers" Version="9.0.0-preview.24122.2">
<Uri>https://github.com/dotnet/roslyn-analyzers</Uri>
- <Sha>4195460a822168a75aa3d31b4a8d0fa88c42855c</Sha>
+ <Sha>ba8b7f2c3ae092d0301f0c5e49bd30340af553c8</Sha>
</Dependency>
<!-- Intermediate is necessary for source build. -->
<Dependency Name="Microsoft.SourceBuild.Intermediate.roslyn" Version="4.10.0-2.24114.13">
@@ -419,14 +418,14 @@
<Sha>77372c66fd54927312b5b0a2e399e192f74445c9</Sha>
<SourceBuild RepoName="roslyn" ManagedOnly="true" />
</Dependency>
- <Dependency Name="Microsoft.DotNet.ApiCompat.Task" Version="9.0.100-preview.2.24118.3">
+ <Dependency Name="Microsoft.DotNet.ApiCompat.Task" Version="9.0.100-preview.3.24126.1">
<Uri>https://github.com/dotnet/sdk</Uri>
- <Sha>b834bb25bdf308c4971d00cef6b726dfaa828c66</Sha>
+ <Sha>7900db19bd7d1a384490909f085cec371ec696d2</Sha>
</Dependency>
<!-- Intermediate is necessary for source build. -->
- <Dependency Name="Microsoft.SourceBuild.Intermediate.sdk" Version="9.0.100-preview.2.24118.3">
+ <Dependency Name="Microsoft.SourceBuild.Intermediate.sdk" Version="9.0.100-preview.3.24126.1">
<Uri>https://github.com/dotnet/sdk</Uri>
- <Sha>b834bb25bdf308c4971d00cef6b726dfaa828c66</Sha>
+ <Sha>7900db19bd7d1a384490909f085cec371ec696d2</Sha>
<SourceBuild RepoName="sdk" ManagedOnly="true" />
</Dependency>
<Dependency Name="optimization.windows_nt-arm64.MIBC.Runtime" Version="1.0.0-prerelease.24104.2">
@@ -443,9 +442,9 @@
<Uri>https://github.com/NuGet/NuGet.Client</Uri>
<Sha>8fef55f5a55a3b4f2c96cd1a9b5ddc51d4b927f8</Sha>
</Dependency>
- <Dependency Name="Microsoft.Dotnet.Sdk.Internal" Version="9.0.100-preview.2.24116.21">
+ <Dependency Name="Microsoft.Dotnet.Sdk.Internal" Version="9.0.100-preview.3.24126.2">
<Uri>https://github.com/dotnet/installer</Uri>
- <Sha>e6b3ff2dff85b43bd3a323e7c0bac4f1f58ccd62</Sha>
+ <Sha>d070660282eb5f78497310f77093638744112e03</Sha>
</Dependency>
</ToolsetDependencies>
</Dependencies>
diff --git a/eng/Versions.props b/eng/Versions.props
index 5ed70ea8a78a..68860fb5cbad 100644
--- a/eng/Versions.props
+++ b/eng/Versions.props
@@ -7,11 +7,11 @@
<MinorVersion>0</MinorVersion>
<PatchVersion>0</PatchVersion>
<SdkBandVersion>9.0.100</SdkBandVersion>
- <PackageVersionNet8>8.0.0</PackageVersionNet8>
+ <PackageVersionNet8>8.0.2</PackageVersionNet8>
<PackageVersionNet7>7.0.$([MSBuild]::Add($([System.Version]::Parse('$(PackageVersionNet8)').Build),14))</PackageVersionNet7>
<PackageVersionNet6>6.0.$([MSBuild]::Add($([System.Version]::Parse('$(PackageVersionNet7)').Build),11))</PackageVersionNet6>
<PreReleaseVersionLabel>preview</PreReleaseVersionLabel>
- <PreReleaseVersionIteration>2</PreReleaseVersionIteration>
+ <PreReleaseVersionIteration>3</PreReleaseVersionIteration>
<!-- Enable to remove prerelease label. -->
<StabilizePackageVersion Condition="'$(StabilizePackageVersion)' == ''">false</StabilizePackageVersion>
<DotNetFinalVersionKind Condition="'$(StabilizePackageVersion)' == 'true'">release</DotNetFinalVersionKind>
@@ -34,8 +34,8 @@
</ItemGroup>
<PropertyGroup>
<!-- dotnet/roslyn-analyzers dependencies -->
- <MicrosoftCodeAnalysisAnalyzersVersion>3.11.0-beta1.24121.1</MicrosoftCodeAnalysisAnalyzersVersion>
- <MicrosoftCodeAnalysisNetAnalyzersVersion>9.0.0-preview.24121.1</MicrosoftCodeAnalysisNetAnalyzersVersion>
+ <MicrosoftCodeAnalysisAnalyzersVersion>3.11.0-beta1.24122.2</MicrosoftCodeAnalysisAnalyzersVersion>
+ <MicrosoftCodeAnalysisNetAnalyzersVersion>9.0.0-preview.24122.2</MicrosoftCodeAnalysisNetAnalyzersVersion>
<!-- dotnet/roslyn dependencies -->
<!--
These versions should not be used by any project that contributes to the design-time experience in VS, such as an analyzer, code-fix, or generator assembly.
@@ -81,33 +81,33 @@
<PropertyGroup>
<StaticCsVersion>0.2.0</StaticCsVersion>
<!-- SDK dependencies -->
- <MicrosoftDotNetApiCompatTaskVersion>9.0.100-preview.2.24118.3</MicrosoftDotNetApiCompatTaskVersion>
+ <MicrosoftDotNetApiCompatTaskVersion>9.0.100-preview.3.24126.1</MicrosoftDotNetApiCompatTaskVersion>
<!-- Arcade dependencies -->
- <MicrosoftDotNetBuildTasksFeedVersion>9.0.0-beta.24112.1</MicrosoftDotNetBuildTasksFeedVersion>
- <MicrosoftDotNetCodeAnalysisVersion>9.0.0-beta.24112.1</MicrosoftDotNetCodeAnalysisVersion>
- <MicrosoftDotNetGenAPIVersion>9.0.0-beta.24112.1</MicrosoftDotNetGenAPIVersion>
- <MicrosoftDotNetGenFacadesVersion>9.0.0-beta.24112.1</MicrosoftDotNetGenFacadesVersion>
- <MicrosoftDotNetXUnitAssertVersion>2.6.7-beta.24112.1</MicrosoftDotNetXUnitAssertVersion>
- <MicrosoftDotNetXUnitExtensionsVersion>9.0.0-beta.24112.1</MicrosoftDotNetXUnitExtensionsVersion>
- <MicrosoftDotNetXUnitConsoleRunnerVersion>2.6.7-beta.24112.1</MicrosoftDotNetXUnitConsoleRunnerVersion>
- <MicrosoftDotNetBuildTasksArchivesVersion>9.0.0-beta.24112.1</MicrosoftDotNetBuildTasksArchivesVersion>
- <MicrosoftDotNetBuildTasksInstallersVersion>9.0.0-beta.24112.1</MicrosoftDotNetBuildTasksInstallersVersion>
- <MicrosoftDotNetBuildTasksPackagingVersion>9.0.0-beta.24112.1</MicrosoftDotNetBuildTasksPackagingVersion>
- <MicrosoftDotNetBuildTasksTargetFrameworkVersion>9.0.0-beta.24112.1</MicrosoftDotNetBuildTasksTargetFrameworkVersion>
- <MicrosoftDotNetBuildTasksTemplatingVersion>9.0.0-beta.24112.1</MicrosoftDotNetBuildTasksTemplatingVersion>
- <MicrosoftDotNetBuildTasksWorkloadsPackageVersion>9.0.0-beta.24112.1</MicrosoftDotNetBuildTasksWorkloadsPackageVersion>
- <MicrosoftDotNetRemoteExecutorVersion>9.0.0-beta.24112.1</MicrosoftDotNetRemoteExecutorVersion>
- <MicrosoftDotNetVersionToolsTasksVersion>9.0.0-beta.24112.1</MicrosoftDotNetVersionToolsTasksVersion>
- <MicrosoftDotNetPackageTestingVersion>9.0.0-beta.24112.1</MicrosoftDotNetPackageTestingVersion>
+ <MicrosoftDotNetBuildTasksFeedVersion>9.0.0-beta.24151.1</MicrosoftDotNetBuildTasksFeedVersion>
+ <MicrosoftDotNetCodeAnalysisVersion>9.0.0-beta.24151.1</MicrosoftDotNetCodeAnalysisVersion>
+ <MicrosoftDotNetGenAPIVersion>9.0.0-beta.24151.1</MicrosoftDotNetGenAPIVersion>
+ <MicrosoftDotNetGenFacadesVersion>9.0.0-beta.24151.1</MicrosoftDotNetGenFacadesVersion>
+ <MicrosoftDotNetXUnitAssertVersion>2.6.7-beta.24151.1</MicrosoftDotNetXUnitAssertVersion>
+ <MicrosoftDotNetXUnitExtensionsVersion>9.0.0-beta.24151.1</MicrosoftDotNetXUnitExtensionsVersion>
+ <MicrosoftDotNetXUnitConsoleRunnerVersion>2.6.7-beta.24151.1</MicrosoftDotNetXUnitConsoleRunnerVersion>
+ <MicrosoftDotNetBuildTasksArchivesVersion>9.0.0-beta.24151.1</MicrosoftDotNetBuildTasksArchivesVersion>
+ <MicrosoftDotNetBuildTasksInstallersVersion>9.0.0-beta.24151.1</MicrosoftDotNetBuildTasksInstallersVersion>
+ <MicrosoftDotNetBuildTasksPackagingVersion>9.0.0-beta.24151.1</MicrosoftDotNetBuildTasksPackagingVersion>
+ <MicrosoftDotNetBuildTasksTargetFrameworkVersion>9.0.0-beta.24151.1</MicrosoftDotNetBuildTasksTargetFrameworkVersion>
+ <MicrosoftDotNetBuildTasksTemplatingVersion>9.0.0-beta.24151.1</MicrosoftDotNetBuildTasksTemplatingVersion>
+ <MicrosoftDotNetBuildTasksWorkloadsPackageVersion>9.0.0-beta.24151.1</MicrosoftDotNetBuildTasksWorkloadsPackageVersion>
+ <MicrosoftDotNetRemoteExecutorVersion>9.0.0-beta.24151.1</MicrosoftDotNetRemoteExecutorVersion>
+ <MicrosoftDotNetVersionToolsTasksVersion>9.0.0-beta.24151.1</MicrosoftDotNetVersionToolsTasksVersion>
+ <MicrosoftDotNetPackageTestingVersion>9.0.0-beta.24151.1</MicrosoftDotNetPackageTestingVersion>
<!-- TODO: Remove pinned xunit.analyzers version: https://github.com/dotnet/runtime/issues/97088 -->
<XUnitAnalyzersVersion>1.4.0</XUnitAnalyzersVersion>
<!-- NuGet dependencies -->
<NuGetBuildTasksPackVersion>6.0.0-preview.1.102</NuGetBuildTasksPackVersion>
<!-- Installer dependencies -->
- <MicrosoftNETCoreAppRuntimewinx64Version>9.0.0-preview.2.24116.2</MicrosoftNETCoreAppRuntimewinx64Version>
+ <MicrosoftNETCoreAppRuntimewinx64Version>9.0.0-preview.3.24126.1</MicrosoftNETCoreAppRuntimewinx64Version>
<MicrosoftExtensionsDependencyModelVersion>6.0.0</MicrosoftExtensionsDependencyModelVersion>
<!-- ILAsm dependencies -->
- <MicrosoftNETCoreILAsmVersion>9.0.0-preview.2.24116.2</MicrosoftNETCoreILAsmVersion>
+ <MicrosoftNETCoreILAsmVersion>9.0.0-preview.3.24126.1</MicrosoftNETCoreILAsmVersion>
<!-- ObjWriter dependencies -->
<runtimelinuxarm64MicrosoftNETCoreRuntimeObjWriterVersion>16.0.5-alpha.1.24112.1</runtimelinuxarm64MicrosoftNETCoreRuntimeObjWriterVersion>
<runtimelinuxx64MicrosoftNETCoreRuntimeObjWriterVersion>16.0.5-alpha.1.24112.1</runtimelinuxx64MicrosoftNETCoreRuntimeObjWriterVersion>
@@ -128,19 +128,19 @@
<SystemDrawingCommonVersion>8.0.0</SystemDrawingCommonVersion>
<SystemIOFileSystemAccessControlVersion>5.0.0</SystemIOFileSystemAccessControlVersion>
<SystemMemoryVersion>4.5.5</SystemMemoryVersion>
- <SystemReflectionMetadataVersion>9.0.0-preview.2.24116.2</SystemReflectionMetadataVersion>
- <SystemReflectionMetadataLoadContextVersion>9.0.0-preview.2.24116.2</SystemReflectionMetadataLoadContextVersion>
+ <SystemReflectionMetadataVersion>9.0.0-preview.3.24126.1</SystemReflectionMetadataVersion>
+ <SystemReflectionMetadataLoadContextVersion>9.0.0-preview.3.24126.1</SystemReflectionMetadataLoadContextVersion>
<SystemSecurityAccessControlVersion>6.0.0</SystemSecurityAccessControlVersion>
<SystemSecurityCryptographyCngVersion>5.0.0</SystemSecurityCryptographyCngVersion>
<SystemSecurityCryptographyOpenSslVersion>5.0.0</SystemSecurityCryptographyOpenSslVersion>
<SystemSecurityPrincipalWindowsVersion>5.0.0</SystemSecurityPrincipalWindowsVersion>
<SystemSecurityPermissionsVersion>7.0.0</SystemSecurityPermissionsVersion>
- <SystemTextJsonVersion>9.0.0-preview.2.24116.2</SystemTextJsonVersion>
+ <SystemTextJsonVersion>9.0.0-preview.3.24126.1</SystemTextJsonVersion>
<SystemRuntimeCompilerServicesUnsafeVersion>6.0.0</SystemRuntimeCompilerServicesUnsafeVersion>
<SystemThreadingAccessControlVersion>7.0.0</SystemThreadingAccessControlVersion>
<SystemThreadingTasksExtensionsVersion>4.5.4</SystemThreadingTasksExtensionsVersion>
<SystemValueTupleVersion>4.5.0</SystemValueTupleVersion>
- <runtimenativeSystemIOPortsVersion>9.0.0-preview.2.24116.2</runtimenativeSystemIOPortsVersion>
+ <runtimenativeSystemIOPortsVersion>9.0.0-preview.3.24126.1</runtimenativeSystemIOPortsVersion>
<!-- Keep toolset versions in sync with dotnet/msbuild and dotnet/sdk -->
<SystemCollectionsImmutableToolsetVersion>8.0.0</SystemCollectionsImmutableToolsetVersion>
<SystemReflectionMetadataToolsetVersion>8.0.0</SystemReflectionMetadataToolsetVersion>
@@ -190,7 +190,7 @@
<MicrosoftDotNetXHarnessTestRunnersCommonVersion>9.0.0-prerelease.24119.1</MicrosoftDotNetXHarnessTestRunnersCommonVersion>
<MicrosoftDotNetXHarnessTestRunnersXunitVersion>9.0.0-prerelease.24119.1</MicrosoftDotNetXHarnessTestRunnersXunitVersion>
<MicrosoftDotNetXHarnessCLIVersion>9.0.0-prerelease.24119.1</MicrosoftDotNetXHarnessCLIVersion>
- <MicrosoftDotNetHotReloadUtilsGeneratorBuildToolVersion>9.0.0-alpha.0.24119.1</MicrosoftDotNetHotReloadUtilsGeneratorBuildToolVersion>
+ <MicrosoftDotNetHotReloadUtilsGeneratorBuildToolVersion>9.0.0-alpha.0.24120.1</MicrosoftDotNetHotReloadUtilsGeneratorBuildToolVersion>
<NUnitVersion>3.12.0</NUnitVersion>
<NUnit3TestAdapterVersion>4.5.0</NUnit3TestAdapterVersion>
<CoverletCollectorVersion>6.0.0</CoverletCollectorVersion>
@@ -216,11 +216,11 @@
<!-- Docs -->
<MicrosoftPrivateIntellisenseVersion>8.0.0-preview-20230918.1</MicrosoftPrivateIntellisenseVersion>
<!-- Mono Cecil -->
- <MicrosoftDotNetCecilVersion>0.11.4-alpha.24119.1</MicrosoftDotNetCecilVersion>
+ <MicrosoftDotNetCecilVersion>0.11.4-alpha.24120.1</MicrosoftDotNetCecilVersion>
<!-- ILCompiler -->
- <MicrosoftDotNetILCompilerVersion>9.0.0-preview.2.24116.2</MicrosoftDotNetILCompilerVersion>
+ <MicrosoftDotNetILCompilerVersion>9.0.0-preview.3.24126.1</MicrosoftDotNetILCompilerVersion>
<!-- ICU -->
- <MicrosoftNETCoreRuntimeICUTransportVersion>9.0.0-preview.2.24119.1</MicrosoftNETCoreRuntimeICUTransportVersion>
+ <MicrosoftNETCoreRuntimeICUTransportVersion>9.0.0-preview.3.24123.1</MicrosoftNETCoreRuntimeICUTransportVersion>
<!-- MsQuic -->
<MicrosoftNativeQuicMsQuicVersion>2.2.3</MicrosoftNativeQuicMsQuicVersion>
<SystemNetMsQuicTransportVersion>9.0.0-alpha.1.24067.1</SystemNetMsQuicTransportVersion>
@@ -243,9 +243,9 @@
Note: when the name is updated, make sure to update dependency name in eng/pipelines/common/xplat-setup.yml
like - DarcDependenciesChanged.Microsoft_NET_Workload_Emscripten_Current_Manifest-9_0_100_Transport
-->
- <MicrosoftNETWorkloadEmscriptenCurrentManifest90100TransportVersion>9.0.0-preview.2.24121.1</MicrosoftNETWorkloadEmscriptenCurrentManifest90100TransportVersion>
+ <MicrosoftNETWorkloadEmscriptenCurrentManifest90100TransportVersion>9.0.0-preview.3.24126.1</MicrosoftNETWorkloadEmscriptenCurrentManifest90100TransportVersion>
<MicrosoftNETRuntimeEmscriptenVersion>$(MicrosoftNETWorkloadEmscriptenCurrentManifest90100TransportVersion)</MicrosoftNETRuntimeEmscriptenVersion>
- <MicrosoftNETRuntimeEmscripten3134Pythonwinx64Version>9.0.0-preview.2.24121.1</MicrosoftNETRuntimeEmscripten3134Pythonwinx64Version>
+ <MicrosoftNETRuntimeEmscripten3134Pythonwinx64Version>9.0.0-preview.3.24126.1</MicrosoftNETRuntimeEmscripten3134Pythonwinx64Version>
<!-- workloads -->
<SwixPackageVersion>1.1.87-gba258badda</SwixPackageVersion>
<WixPackageVersion>1.0.0-v3.14.0.5722</WixPackageVersion>
@@ -262,7 +262,7 @@
<MicrosoftExtensionsLoggingVersion>3.1.7</MicrosoftExtensionsLoggingVersion>
<MicrosoftSymbolStoreVersion>1.0.406601</MicrosoftSymbolStoreVersion>
<!-- installer version, for testing workloads -->
- <MicrosoftDotnetSdkInternalVersion>9.0.100-preview.2.24116.21</MicrosoftDotnetSdkInternalVersion>
+ <MicrosoftDotnetSdkInternalVersion>9.0.100-preview.3.24126.2</MicrosoftDotnetSdkInternalVersion>
<SdkVersionForWorkloadTesting>$(MicrosoftDotnetSdkInternalVersion)</SdkVersionForWorkloadTesting>
</PropertyGroup>
</Project>
diff --git a/eng/build.ps1 b/eng/build.ps1
index db18267f33e1..be88dcb263e8 100644
--- a/eng/build.ps1
+++ b/eng/build.ps1
@@ -325,6 +325,9 @@ if ($env:TreatWarningsAsErrors -eq 'false') {
$arguments += " -warnAsError 0"
}
+# disable terminal logger for now: https://github.com/dotnet/runtime/issues/97211
+$arguments += " /tl:false"
+
# Disable targeting pack caching as we reference a partially constructed targeting pack and update it later.
# The later changes are ignored when using the cache.
$env:DOTNETSDK_ALLOW_TARGETING_PACK_CACHING=0
diff --git a/eng/build.sh b/eng/build.sh
index 67f3cfeea472..75fe2cdc39c5 100755
--- a/eng/build.sh
+++ b/eng/build.sh
@@ -553,6 +553,9 @@ if [[ "${TreatWarningsAsErrors:-}" == "false" ]]; then
arguments="$arguments -warnAsError 0"
fi
+# disable terminal logger for now: https://github.com/dotnet/runtime/issues/97211
+arguments="$arguments -tl:false"
+
initDistroRid "$os" "$arch" "$crossBuild"
# Disable targeting pack caching as we reference a partially constructed targeting pack and update it later.
diff --git a/eng/common/templates-official/job/job.yml b/eng/common/templates-official/job/job.yml
new file mode 100644
index 000000000000..352607308fd5
--- /dev/null
+++ b/eng/common/templates-official/job/job.yml
@@ -0,0 +1,255 @@
+# Internal resources (telemetry, microbuild) can only be accessed from non-public projects,
+# and some (Microbuild) should only be applied to non-PR cases for internal builds.
+
+parameters:
+# Job schema parameters - https://docs.microsoft.com/en-us/azure/devops/pipelines/yaml-schema?view=vsts&tabs=schema#job
+ cancelTimeoutInMinutes: ''
+ condition: ''
+ container: ''
+ continueOnError: false
+ dependsOn: ''
+ displayName: ''
+ pool: ''
+ steps: []
+ strategy: ''
+ timeoutInMinutes: ''
+ variables: []
+ workspace: ''
+
+# Job base template specific parameters
+ # See schema documentation - https://github.com/dotnet/arcade/blob/master/Documentation/AzureDevOps/TemplateSchema.md
+ artifacts: ''
+ enableMicrobuild: false
+ enablePublishBuildArtifacts: false
+ enablePublishBuildAssets: false
+ enablePublishTestResults: false
+ enablePublishUsingPipelines: false
+ enableBuildRetry: false
+ disableComponentGovernance: ''
+ componentGovernanceIgnoreDirectories: ''
+ mergeTestResults: false
+ testRunTitle: ''
+ testResultsFormat: ''
+ name: ''
+ preSteps: []
+ runAsPublic: false
+# Sbom related params
+ enableSbom: true
+ PackageVersion: 7.0.0
+ BuildDropPath: '$(Build.SourcesDirectory)/artifacts'
+
+jobs:
+- job: ${{ parameters.name }}
+
+ ${{ if ne(parameters.cancelTimeoutInMinutes, '') }}:
+ cancelTimeoutInMinutes: ${{ parameters.cancelTimeoutInMinutes }}
+
+ ${{ if ne(parameters.condition, '') }}:
+ condition: ${{ parameters.condition }}
+
+ ${{ if ne(parameters.container, '') }}:
+ container: ${{ parameters.container }}
+
+ ${{ if ne(parameters.continueOnError, '') }}:
+ continueOnError: ${{ parameters.continueOnError }}
+
+ ${{ if ne(parameters.dependsOn, '') }}:
+ dependsOn: ${{ parameters.dependsOn }}
+
+ ${{ if ne(parameters.displayName, '') }}:
+ displayName: ${{ parameters.displayName }}
+
+ ${{ if ne(parameters.pool, '') }}:
+ pool: ${{ parameters.pool }}
+
+ ${{ if ne(parameters.strategy, '') }}:
+ strategy: ${{ parameters.strategy }}
+
+ ${{ if ne(parameters.timeoutInMinutes, '') }}:
+ timeoutInMinutes: ${{ parameters.timeoutInMinutes }}
+
+ variables:
+ - ${{ if ne(parameters.enableTelemetry, 'false') }}:
+ - name: DOTNET_CLI_TELEMETRY_PROFILE
+ value: '$(Build.Repository.Uri)'
+ - ${{ if eq(parameters.enableRichCodeNavigation, 'true') }}:
+ - name: EnableRichCodeNavigation
+ value: 'true'
+ # Retry signature validation up to three times, waiting 2 seconds between attempts.
+ # See https://learn.microsoft.com/en-us/nuget/reference/errors-and-warnings/nu3028#retry-untrusted-root-failures
+ - name: NUGET_EXPERIMENTAL_CHAIN_BUILD_RETRY_POLICY
+ value: 3,2000
+ - ${{ each variable in parameters.variables }}:
+ # handle name-value variable syntax
+ # example:
+ # - name: [key]
+ # value: [value]
+ - ${{ if ne(variable.name, '') }}:
+ - name: ${{ variable.name }}
+ value: ${{ variable.value }}
+
+ # handle variable groups
+ - ${{ if ne(variable.group, '') }}:
+ - group: ${{ variable.group }}
+
+ # handle template variable syntax
+ # example:
+ # - template: path/to/template.yml
+ # parameters:
+ # [key]: [value]
+ - ${{ if ne(variable.template, '') }}:
+ - template: ${{ variable.template }}
+ ${{ if ne(variable.parameters, '') }}:
+ parameters: ${{ variable.parameters }}
+
+ # handle key-value variable syntax.
+ # example:
+ # - [key]: [value]
+ - ${{ if and(eq(variable.name, ''), eq(variable.group, ''), eq(variable.template, '')) }}:
+ - ${{ each pair in variable }}:
+ - name: ${{ pair.key }}
+ value: ${{ pair.value }}
+
+ # DotNet-HelixApi-Access provides 'HelixApiAccessToken' for internal builds
+ - ${{ if and(eq(parameters.enableTelemetry, 'true'), eq(parameters.runAsPublic, 'false'), ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
+ - group: DotNet-HelixApi-Access
+
+ ${{ if ne(parameters.workspace, '') }}:
+ workspace: ${{ parameters.workspace }}
+
+ steps:
+ - ${{ if ne(parameters.preSteps, '') }}:
+ - ${{ each preStep in parameters.preSteps }}:
+ - ${{ preStep }}
+
+ - ${{ if and(eq(parameters.runAsPublic, 'false'), ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
+ - ${{ if eq(parameters.enableMicrobuild, 'true') }}:
+ - task: MicroBuildSigningPlugin@3
+ displayName: Install MicroBuild plugin
+ inputs:
+ signType: $(_SignType)
+ zipSources: false
+ feedSource: https://dnceng.pkgs.visualstudio.com/_packaging/MicroBuildToolset/nuget/v3/index.json
+ env:
+ TeamName: $(_TeamName)
+ continueOnError: ${{ parameters.continueOnError }}
+ condition: and(succeeded(), in(variables['_SignType'], 'real', 'test'), eq(variables['Agent.Os'], 'Windows_NT'))
+
+ - ${{ if and(eq(parameters.runAsPublic, 'false'), eq(variables['System.TeamProject'], 'internal')) }}:
+ - task: NuGetAuthenticate@1
+
+ - ${{ if and(ne(parameters.artifacts.download, 'false'), ne(parameters.artifacts.download, '')) }}:
+ - task: DownloadPipelineArtifact@2
+ inputs:
+ buildType: current
+ artifactName: ${{ coalesce(parameters.artifacts.download.name, 'Artifacts_$(Agent.OS)_$(_BuildConfig)') }}
+ targetPath: ${{ coalesce(parameters.artifacts.download.path, 'artifacts') }}
+ itemPattern: ${{ coalesce(parameters.artifacts.download.pattern, '**') }}
+
+ - ${{ each step in parameters.steps }}:
+ - ${{ step }}
+
+ - ${{ if eq(parameters.enableRichCodeNavigation, true) }}:
+ - task: RichCodeNavIndexer@0
+ displayName: RichCodeNav Upload
+ inputs:
+ languages: ${{ coalesce(parameters.richCodeNavigationLanguage, 'csharp') }}
+ environment: ${{ coalesce(parameters.richCodeNavigationEnvironment, 'internal') }}
+ richNavLogOutputDirectory: $(Build.SourcesDirectory)/artifacts/bin
+ uploadRichNavArtifacts: ${{ coalesce(parameters.richCodeNavigationUploadArtifacts, false) }}
+ continueOnError: true
+
+ - template: /eng/common/templates-official/steps/component-governance.yml
+ parameters:
+ ${{ if eq(parameters.disableComponentGovernance, '') }}:
+ ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest'), eq(parameters.runAsPublic, 'false'), or(startsWith(variables['Build.SourceBranch'], 'refs/heads/release/'), startsWith(variables['Build.SourceBranch'], 'refs/heads/dotnet/'), startsWith(variables['Build.SourceBranch'], 'refs/heads/microsoft/'), eq(variables['Build.SourceBranch'], 'refs/heads/main'))) }}:
+ disableComponentGovernance: false
+ ${{ else }}:
+ disableComponentGovernance: true
+ ${{ else }}:
+ disableComponentGovernance: ${{ parameters.disableComponentGovernance }}
+ componentGovernanceIgnoreDirectories: ${{ parameters.componentGovernanceIgnoreDirectories }}
+
+ - ${{ if eq(parameters.enableMicrobuild, 'true') }}:
+ - ${{ if and(eq(parameters.runAsPublic, 'false'), ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
+ - task: MicroBuildCleanup@1
+ displayName: Execute Microbuild cleanup tasks
+ condition: and(always(), in(variables['_SignType'], 'real', 'test'), eq(variables['Agent.Os'], 'Windows_NT'))
+ continueOnError: ${{ parameters.continueOnError }}
+ env:
+ TeamName: $(_TeamName)
+
+ - ${{ if ne(parameters.artifacts.publish, '') }}:
+ - ${{ if and(ne(parameters.artifacts.publish.artifacts, 'false'), ne(parameters.artifacts.publish.artifacts, '')) }}:
+ - task: CopyFiles@2
+ displayName: Gather binaries for publish to artifacts
+ inputs:
+ SourceFolder: 'artifacts/bin'
+ Contents: '**'
+ TargetFolder: '$(Build.ArtifactStagingDirectory)/artifacts/bin'
+ - task: CopyFiles@2
+ displayName: Gather packages for publish to artifacts
+ inputs:
+ SourceFolder: 'artifacts/packages'
+ Contents: '**'
+ TargetFolder: '$(Build.ArtifactStagingDirectory)/artifacts/packages'
+ - task: 1ES.PublishBuildArtifacts@1
+ displayName: Publish pipeline artifacts
+ inputs:
+ PathtoPublish: '$(Build.ArtifactStagingDirectory)/artifacts'
+ PublishLocation: Container
+ ArtifactName: ${{ coalesce(parameters.artifacts.publish.artifacts.name , 'Artifacts_$(Agent.Os)_$(_BuildConfig)') }}
+ continueOnError: true
+ condition: always()
+ - ${{ if and(ne(parameters.artifacts.publish.logs, 'false'), ne(parameters.artifacts.publish.logs, '')) }}:
+ - publish: artifacts/log
+ artifact: ${{ coalesce(parameters.artifacts.publish.logs.name, 'Logs_Build_$(Agent.Os)_$(_BuildConfig)') }}
+ displayName: Publish logs
+ continueOnError: true
+ condition: always()
+
+ - ${{ if ne(parameters.enablePublishBuildArtifacts, 'false') }}:
+ - task: 1ES.PublishBuildArtifacts@1
+ displayName: Publish Logs
+ inputs:
+ PathtoPublish: '$(Build.SourcesDirectory)/artifacts/log/$(_BuildConfig)'
+ PublishLocation: Container
+ ArtifactName: ${{ coalesce(parameters.enablePublishBuildArtifacts.artifactName, '$(Agent.Os)_$(Agent.JobName)' ) }}
+ continueOnError: true
+ condition: always()
+
+ - ${{ if or(and(eq(parameters.enablePublishTestResults, 'true'), eq(parameters.testResultsFormat, '')), eq(parameters.testResultsFormat, 'xunit')) }}:
+ - task: PublishTestResults@2
+ displayName: Publish XUnit Test Results
+ inputs:
+ testResultsFormat: 'xUnit'
+ testResultsFiles: '*.xml'
+ searchFolder: '$(Build.SourcesDirectory)/artifacts/TestResults/$(_BuildConfig)'
+ testRunTitle: ${{ coalesce(parameters.testRunTitle, parameters.name, '$(System.JobName)') }}-xunit
+ mergeTestResults: ${{ parameters.mergeTestResults }}
+ continueOnError: true
+ condition: always()
+ - ${{ if or(and(eq(parameters.enablePublishTestResults, 'true'), eq(parameters.testResultsFormat, '')), eq(parameters.testResultsFormat, 'vstest')) }}:
+ - task: PublishTestResults@2
+ displayName: Publish TRX Test Results
+ inputs:
+ testResultsFormat: 'VSTest'
+ testResultsFiles: '*.trx'
+ searchFolder: '$(Build.SourcesDirectory)/artifacts/TestResults/$(_BuildConfig)'
+ testRunTitle: ${{ coalesce(parameters.testRunTitle, parameters.name, '$(System.JobName)') }}-trx
+ mergeTestResults: ${{ parameters.mergeTestResults }}
+ continueOnError: true
+ condition: always()
+
+ - ${{ if and(eq(parameters.runAsPublic, 'false'), ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest'), eq(parameters.enableSbom, 'true')) }}:
+ - template: /eng/common/templates-official/steps/generate-sbom.yml
+ parameters:
+ PackageVersion: ${{ parameters.packageVersion}}
+ BuildDropPath: ${{ parameters.buildDropPath }}
+ IgnoreDirectories: ${{ parameters.componentGovernanceIgnoreDirectories }}
+
+ - ${{ if eq(parameters.enableBuildRetry, 'true') }}:
+ - publish: $(Build.SourcesDirectory)\eng\common\BuildConfiguration
+ artifact: BuildConfiguration
+ displayName: Publish build retry configuration
+ continueOnError: true
diff --git a/eng/common/templates-official/job/onelocbuild.yml b/eng/common/templates-official/job/onelocbuild.yml
new file mode 100644
index 000000000000..ba9ba4930329
--- /dev/null
+++ b/eng/common/templates-official/job/onelocbuild.yml
@@ -0,0 +1,112 @@
+parameters:
+ # Optional: dependencies of the job
+ dependsOn: ''
+
+ # Optional: A defined YAML pool - https://docs.microsoft.com/en-us/azure/devops/pipelines/yaml-schema?view=vsts&tabs=schema#pool
+ pool: ''
+
+ CeapexPat: $(dn-bot-ceapex-package-r) # PAT for the loc AzDO instance https://dev.azure.com/ceapex
+ GithubPat: $(BotAccount-dotnet-bot-repo-PAT)
+
+ SourcesDirectory: $(Build.SourcesDirectory)
+ CreatePr: true
+ AutoCompletePr: false
+ ReusePr: true
+ UseLfLineEndings: true
+ UseCheckedInLocProjectJson: false
+ SkipLocProjectJsonGeneration: false
+ LanguageSet: VS_Main_Languages
+ LclSource: lclFilesInRepo
+ LclPackageId: ''
+ RepoType: gitHub
+ GitHubOrg: dotnet
+ MirrorRepo: ''
+ MirrorBranch: main
+ condition: ''
+ JobNameSuffix: ''
+
+jobs:
+- job: OneLocBuild${{ parameters.JobNameSuffix }}
+
+ dependsOn: ${{ parameters.dependsOn }}
+
+ displayName: OneLocBuild${{ parameters.JobNameSuffix }}
+
+ variables:
+ - group: OneLocBuildVariables # Contains the CeapexPat and GithubPat
+ - name: _GenerateLocProjectArguments
+ value: -SourcesDirectory ${{ parameters.SourcesDirectory }}
+ -LanguageSet "${{ parameters.LanguageSet }}"
+ -CreateNeutralXlfs
+ - ${{ if eq(parameters.UseCheckedInLocProjectJson, 'true') }}:
+ - name: _GenerateLocProjectArguments
+ value: ${{ variables._GenerateLocProjectArguments }} -UseCheckedInLocProjectJson
+ - template: /eng/common/templates-official/variables/pool-providers.yml
+
+ ${{ if ne(parameters.pool, '') }}:
+ pool: ${{ parameters.pool }}
+ ${{ if eq(parameters.pool, '') }}:
+ pool:
+ # We don't use the collection uri here because it might vary (.visualstudio.com vs. dev.azure.com)
+ ${{ if eq(variables['System.TeamProject'], 'DevDiv') }}:
+ name: AzurePipelines-EO
+ image: 1ESPT-Windows2022
+ demands: Cmd
+ os: windows
+ # If it's not devdiv, it's dnceng
+ ${{ if ne(variables['System.TeamProject'], 'DevDiv') }}:
+ name: $(DncEngInternalBuildPool)
+ image: 1es-windows-2022-pt
+ os: windows
+
+ steps:
+ - ${{ if ne(parameters.SkipLocProjectJsonGeneration, 'true') }}:
+ - task: Powershell@2
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/generate-locproject.ps1
+ arguments: $(_GenerateLocProjectArguments)
+ displayName: Generate LocProject.json
+ condition: ${{ parameters.condition }}
+
+ - task: OneLocBuild@2
+ displayName: OneLocBuild
+ env:
+ SYSTEM_ACCESSTOKEN: $(System.AccessToken)
+ inputs:
+ locProj: eng/Localize/LocProject.json
+ outDir: $(Build.ArtifactStagingDirectory)
+ lclSource: ${{ parameters.LclSource }}
+ lclPackageId: ${{ parameters.LclPackageId }}
+ isCreatePrSelected: ${{ parameters.CreatePr }}
+ isAutoCompletePrSelected: ${{ parameters.AutoCompletePr }}
+ ${{ if eq(parameters.CreatePr, true) }}:
+ isUseLfLineEndingsSelected: ${{ parameters.UseLfLineEndings }}
+ ${{ if eq(parameters.RepoType, 'gitHub') }}:
+ isShouldReusePrSelected: ${{ parameters.ReusePr }}
+ packageSourceAuth: patAuth
+ patVariable: ${{ parameters.CeapexPat }}
+ ${{ if eq(parameters.RepoType, 'gitHub') }}:
+ repoType: ${{ parameters.RepoType }}
+ gitHubPatVariable: "${{ parameters.GithubPat }}"
+ ${{ if ne(parameters.MirrorRepo, '') }}:
+ isMirrorRepoSelected: true
+ gitHubOrganization: ${{ parameters.GitHubOrg }}
+ mirrorRepo: ${{ parameters.MirrorRepo }}
+ mirrorBranch: ${{ parameters.MirrorBranch }}
+ condition: ${{ parameters.condition }}
+
+ - task: 1ES.PublishBuildArtifacts@1
+ displayName: Publish Localization Files
+ inputs:
+ PathtoPublish: '$(Build.ArtifactStagingDirectory)/loc'
+ PublishLocation: Container
+ ArtifactName: Loc
+ condition: ${{ parameters.condition }}
+
+ - task: 1ES.PublishBuildArtifacts@1
+ displayName: Publish LocProject.json
+ inputs:
+ PathtoPublish: '$(Build.SourcesDirectory)/eng/Localize/'
+ PublishLocation: Container
+ ArtifactName: Loc
+ condition: ${{ parameters.condition }} \ No newline at end of file
diff --git a/eng/common/templates-official/job/publish-build-assets.yml b/eng/common/templates-official/job/publish-build-assets.yml
new file mode 100644
index 000000000000..432dc92ab27c
--- /dev/null
+++ b/eng/common/templates-official/job/publish-build-assets.yml
@@ -0,0 +1,157 @@
+parameters:
+ configuration: 'Debug'
+
+ # Optional: condition for the job to run
+ condition: ''
+
+ # Optional: 'true' if future jobs should run even if this job fails
+ continueOnError: false
+
+ # Optional: dependencies of the job
+ dependsOn: ''
+
+ # Optional: Include PublishBuildArtifacts task
+ enablePublishBuildArtifacts: false
+
+ # Optional: A defined YAML pool - https://docs.microsoft.com/en-us/azure/devops/pipelines/yaml-schema?view=vsts&tabs=schema#pool
+ pool: {}
+
+ # Optional: should run as a public build even in the internal project
+ # if 'true', the build won't run any of the internal only steps, even if it is running in non-public projects.
+ runAsPublic: false
+
+ # Optional: whether the build's artifacts will be published using release pipelines or direct feed publishing
+ publishUsingPipelines: false
+
+ # Optional: whether the build's artifacts will be published using release pipelines or direct feed publishing
+ publishAssetsImmediately: false
+
+ artifactsPublishingAdditionalParameters: ''
+
+ signingValidationAdditionalParameters: ''
+
+jobs:
+- job: Asset_Registry_Publish
+
+ dependsOn: ${{ parameters.dependsOn }}
+ timeoutInMinutes: 150
+
+ ${{ if eq(parameters.publishAssetsImmediately, 'true') }}:
+ displayName: Publish Assets
+ ${{ else }}:
+ displayName: Publish to Build Asset Registry
+
+ variables:
+ - template: /eng/common/templates-official/variables/pool-providers.yml
+ - ${{ if and(eq(parameters.runAsPublic, 'false'), ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
+ - group: Publish-Build-Assets
+ - group: AzureDevOps-Artifact-Feeds-Pats
+ - name: runCodesignValidationInjection
+ value: false
+ # unconditional - needed for logs publishing (redactor tool version)
+ - template: /eng/common/templates-official/post-build/common-variables.yml
+
+ pool:
+ # We don't use the collection uri here because it might vary (.visualstudio.com vs. dev.azure.com)
+ ${{ if eq(variables['System.TeamProject'], 'DevDiv') }}:
+ name: AzurePipelines-EO
+ image: 1ESPT-WINDOWS2022
+ demands: Cmd
+ os: windows
+ # If it's not devdiv, it's dnceng
+ ${{ if ne(variables['System.TeamProject'], 'DevDiv') }}:
+ name: $(DncEngInternalBuildPool)
+ image: 1es-windows-2022-pt
+ os: windows
+ steps:
+ - ${{ if and(eq(parameters.runAsPublic, 'false'), ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
+ - checkout: self
+ fetchDepth: 3
+ clean: true
+
+ - task: DownloadBuildArtifacts@0
+ displayName: Download artifact
+ inputs:
+ artifactName: AssetManifests
+ downloadPath: '$(Build.StagingDirectory)/Download'
+ checkDownloadedFiles: true
+ condition: ${{ parameters.condition }}
+ continueOnError: ${{ parameters.continueOnError }}
+
+ - task: NuGetAuthenticate@1
+
+ - task: PowerShell@2
+ displayName: Publish Build Assets
+ inputs:
+ filePath: eng\common\sdk-task.ps1
+ arguments: -task PublishBuildAssets -restore -msbuildEngine dotnet
+ /p:ManifestsPath='$(Build.StagingDirectory)/Download/AssetManifests'
+ /p:BuildAssetRegistryToken=$(MaestroAccessToken)
+ /p:MaestroApiEndpoint=https://maestro.dot.net
+ /p:PublishUsingPipelines=${{ parameters.publishUsingPipelines }}
+ /p:OfficialBuildId=$(Build.BuildNumber)
+ condition: ${{ parameters.condition }}
+ continueOnError: ${{ parameters.continueOnError }}
+
+ - task: powershell@2
+ displayName: Create ReleaseConfigs Artifact
+ inputs:
+ targetType: inline
+ script: |
+ Add-Content -Path "$(Build.StagingDirectory)/ReleaseConfigs.txt" -Value $(BARBuildId)
+ Add-Content -Path "$(Build.StagingDirectory)/ReleaseConfigs.txt" -Value "$(DefaultChannels)"
+ Add-Content -Path "$(Build.StagingDirectory)/ReleaseConfigs.txt" -Value $(IsStableBuild)
+
+ - task: 1ES.PublishBuildArtifacts@1
+ displayName: Publish ReleaseConfigs Artifact
+ inputs:
+ PathtoPublish: '$(Build.StagingDirectory)/ReleaseConfigs.txt'
+ PublishLocation: Container
+ ArtifactName: ReleaseConfigs
+
+ - task: powershell@2
+ displayName: Check if SymbolPublishingExclusionsFile.txt exists
+ inputs:
+ targetType: inline
+ script: |
+ $symbolExclusionfile = "$(Build.SourcesDirectory)/eng/SymbolPublishingExclusionsFile.txt"
+ if(Test-Path -Path $symbolExclusionfile)
+ {
+ Write-Host "SymbolExclusionFile exists"
+ Write-Host "##vso[task.setvariable variable=SymbolExclusionFile]true"
+ }
+ else{
+ Write-Host "Symbols Exclusion file does not exists"
+ Write-Host "##vso[task.setvariable variable=SymbolExclusionFile]false"
+ }
+
+ - task: 1ES.PublishBuildArtifacts@1
+ displayName: Publish SymbolPublishingExclusionsFile Artifact
+ condition: eq(variables['SymbolExclusionFile'], 'true')
+ inputs:
+ PathtoPublish: '$(Build.SourcesDirectory)/eng/SymbolPublishingExclusionsFile.txt'
+ PublishLocation: Container
+ ArtifactName: ReleaseConfigs
+
+ - ${{ if eq(parameters.publishAssetsImmediately, 'true') }}:
+ - template: /eng/common/templates-official/post-build/setup-maestro-vars.yml
+ parameters:
+ BARBuildId: ${{ parameters.BARBuildId }}
+ PromoteToChannelIds: ${{ parameters.PromoteToChannelIds }}
+
+ - task: PowerShell@2
+ displayName: Publish Using Darc
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/post-build/publish-using-darc.ps1
+ arguments: -BuildId $(BARBuildId)
+ -PublishingInfraVersion 3
+ -AzdoToken '$(publishing-dnceng-devdiv-code-r-build-re)'
+ -MaestroToken '$(MaestroApiAccessToken)'
+ -WaitPublishingFinish true
+ -ArtifactsPublishingAdditionalParameters '${{ parameters.artifactsPublishingAdditionalParameters }}'
+ -SymbolPublishingAdditionalParameters '${{ parameters.symbolPublishingAdditionalParameters }}'
+
+ - ${{ if eq(parameters.enablePublishBuildArtifacts, 'true') }}:
+ - template: /eng/common/templates-official/steps/publish-logs.yml
+ parameters:
+ JobLabel: 'Publish_Artifacts_Logs'
diff --git a/eng/common/templates-official/job/source-build.yml b/eng/common/templates-official/job/source-build.yml
new file mode 100644
index 000000000000..50f04e642a35
--- /dev/null
+++ b/eng/common/templates-official/job/source-build.yml
@@ -0,0 +1,67 @@
+parameters:
+ # This template adds arcade-powered source-build to CI. The template produces a server job with a
+ # default ID 'Source_Build_Complete' to put in a dependency list if necessary.
+
+ # Specifies the prefix for source-build jobs added to pipeline. Use this if disambiguation needed.
+ jobNamePrefix: 'Source_Build'
+
+ # Defines the platform on which to run the job. By default, a linux-x64 machine, suitable for
+ # managed-only repositories. This is an object with these properties:
+ #
+ # name: ''
+ # The name of the job. This is included in the job ID.
+ # targetRID: ''
+ # The name of the target RID to use, instead of the one auto-detected by Arcade.
+ # nonPortable: false
+ # Enables non-portable mode. This means a more specific RID (e.g. fedora.32-x64 rather than
+ # linux-x64), and compiling against distro-provided packages rather than portable ones.
+ # skipPublishValidation: false
+ # Disables publishing validation. By default, a check is performed to ensure no packages are
+ # published by source-build.
+ # container: ''
+ # A container to use. Runs in docker.
+ # pool: {}
+ # A pool to use. Runs directly on an agent.
+ # buildScript: ''
+ # Specifies the build script to invoke to perform the build in the repo. The default
+ # './build.sh' should work for typical Arcade repositories, but this is customizable for
+ # difficult situations.
+ # jobProperties: {}
+ # A list of job properties to inject at the top level, for potential extensibility beyond
+ # container and pool.
+ platform: {}
+
+jobs:
+- job: ${{ parameters.jobNamePrefix }}_${{ parameters.platform.name }}
+ displayName: Source-Build (${{ parameters.platform.name }})
+
+ ${{ each property in parameters.platform.jobProperties }}:
+ ${{ property.key }}: ${{ property.value }}
+
+ ${{ if ne(parameters.platform.container, '') }}:
+ container: ${{ parameters.platform.container }}
+
+ ${{ if eq(parameters.platform.pool, '') }}:
+ # The default VM host AzDO pool. This should be capable of running Docker containers: almost all
+ # source-build builds run in Docker, including the default managed platform.
+ # /eng/common/templates-official/variables/pool-providers.yml can't be used here (some customers declare variables already), so duplicate its logic
+ pool:
+ ${{ if eq(variables['System.TeamProject'], 'public') }}:
+ name: $[replace(replace(eq(contains(coalesce(variables['System.PullRequest.TargetBranch'], variables['Build.SourceBranch'], 'refs/heads/main'), 'release'), 'true'), True, 'NetCore-Svc-Public' ), False, 'NetCore-Public')]
+ demands: ImageOverride -equals build.ubuntu.1804.amd64
+
+ ${{ if eq(variables['System.TeamProject'], 'internal') }}:
+ name: $[replace(replace(eq(contains(coalesce(variables['System.PullRequest.TargetBranch'], variables['Build.SourceBranch'], 'refs/heads/main'), 'release'), 'true'), True, 'NetCore1ESPool-Svc-Internal'), False, 'NetCore1ESPool-Internal')]
+ image: 1es-mariner-2-pt
+ os: linux
+
+ ${{ if ne(parameters.platform.pool, '') }}:
+ pool: ${{ parameters.platform.pool }}
+
+ workspace:
+ clean: all
+
+ steps:
+ - template: /eng/common/templates-official/steps/source-build.yml
+ parameters:
+ platform: ${{ parameters.platform }}
diff --git a/eng/common/templates-official/job/source-index-stage1.yml b/eng/common/templates-official/job/source-index-stage1.yml
new file mode 100644
index 000000000000..757af7c7c4f2
--- /dev/null
+++ b/eng/common/templates-official/job/source-index-stage1.yml
@@ -0,0 +1,67 @@
+parameters:
+ runAsPublic: false
+ sourceIndexPackageVersion: 1.0.1-20231213.4
+ sourceIndexPackageSource: https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-tools/nuget/v3/index.json
+ sourceIndexBuildCommand: powershell -NoLogo -NoProfile -ExecutionPolicy Bypass -Command "eng/common/build.ps1 -restore -build -binarylog -ci"
+ preSteps: []
+ binlogPath: artifacts/log/Debug/Build.binlog
+ condition: ''
+ dependsOn: ''
+ pool: ''
+
+jobs:
+- job: SourceIndexStage1
+ dependsOn: ${{ parameters.dependsOn }}
+ condition: ${{ parameters.condition }}
+ variables:
+ - name: SourceIndexPackageVersion
+ value: ${{ parameters.sourceIndexPackageVersion }}
+ - name: SourceIndexPackageSource
+ value: ${{ parameters.sourceIndexPackageSource }}
+ - name: BinlogPath
+ value: ${{ parameters.binlogPath }}
+ - ${{ if and(eq(parameters.runAsPublic, 'false'), ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
+ - group: source-dot-net stage1 variables
+ - template: /eng/common/templates-official/variables/pool-providers.yml
+
+ ${{ if ne(parameters.pool, '') }}:
+ pool: ${{ parameters.pool }}
+ ${{ if eq(parameters.pool, '') }}:
+ pool:
+ ${{ if eq(variables['System.TeamProject'], 'public') }}:
+ name: $(DncEngPublicBuildPool)
+ image: windows.vs2022.amd64.open
+ ${{ if eq(variables['System.TeamProject'], 'internal') }}:
+ name: $(DncEngInternalBuildPool)
+ image: windows.vs2022.amd64
+
+ steps:
+ - ${{ each preStep in parameters.preSteps }}:
+ - ${{ preStep }}
+
+ - task: UseDotNet@2
+ displayName: Use .NET 8 SDK
+ inputs:
+ packageType: sdk
+ version: 8.0.x
+ installationPath: $(Agent.TempDirectory)/dotnet
+ workingDirectory: $(Agent.TempDirectory)
+
+ - script: |
+ $(Agent.TempDirectory)/dotnet/dotnet tool install BinLogToSln --version $(SourceIndexPackageVersion) --add-source $(SourceIndexPackageSource) --tool-path $(Agent.TempDirectory)/.source-index/tools
+ $(Agent.TempDirectory)/dotnet/dotnet tool install UploadIndexStage1 --version $(SourceIndexPackageVersion) --add-source $(SourceIndexPackageSource) --tool-path $(Agent.TempDirectory)/.source-index/tools
+ displayName: Download Tools
+ # Set working directory to temp directory so 'dotnet' doesn't try to use global.json and use the repo's sdk.
+ workingDirectory: $(Agent.TempDirectory)
+
+ - script: ${{ parameters.sourceIndexBuildCommand }}
+ displayName: Build Repository
+
+ - script: $(Agent.TempDirectory)/.source-index/tools/BinLogToSln -i $(BinlogPath) -r $(Build.SourcesDirectory) -n $(Build.Repository.Name) -o .source-index/stage1output
+ displayName: Process Binlog into indexable sln
+
+ - ${{ if and(eq(parameters.runAsPublic, 'false'), ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
+ - script: $(Agent.TempDirectory)/.source-index/tools/UploadIndexStage1 -i .source-index/stage1output -n $(Build.Repository.Name)
+ displayName: Upload stage1 artifacts to source index
+ env:
+ BLOB_CONTAINER_URL: $(source-dot-net-stage1-blob-container-url)
diff --git a/eng/common/templates-official/jobs/codeql-build.yml b/eng/common/templates-official/jobs/codeql-build.yml
new file mode 100644
index 000000000000..b68d3c2f3199
--- /dev/null
+++ b/eng/common/templates-official/jobs/codeql-build.yml
@@ -0,0 +1,31 @@
+parameters:
+ # See schema documentation in /Documentation/AzureDevOps/TemplateSchema.md
+ continueOnError: false
+ # Required: A collection of jobs to run - https://docs.microsoft.com/en-us/azure/devops/pipelines/yaml-schema?view=vsts&tabs=schema#job
+ jobs: []
+ # Optional: if specified, restore and use this version of Guardian instead of the default.
+ overrideGuardianVersion: ''
+
+jobs:
+- template: /eng/common/templates-official/jobs/jobs.yml
+ parameters:
+ enableMicrobuild: false
+ enablePublishBuildArtifacts: false
+ enablePublishTestResults: false
+ enablePublishBuildAssets: false
+ enablePublishUsingPipelines: false
+ enableTelemetry: true
+
+ variables:
+ - group: Publish-Build-Assets
+ # The Guardian version specified in 'eng/common/sdl/packages.config'. This value must be kept in
+ # sync with the packages.config file.
+ - name: DefaultGuardianVersion
+ value: 0.109.0
+ - name: GuardianPackagesConfigFile
+ value: $(Build.SourcesDirectory)\eng\common\sdl\packages.config
+ - name: GuardianVersion
+ value: ${{ coalesce(parameters.overrideGuardianVersion, '$(DefaultGuardianVersion)') }}
+
+ jobs: ${{ parameters.jobs }}
+
diff --git a/eng/common/templates-official/jobs/jobs.yml b/eng/common/templates-official/jobs/jobs.yml
new file mode 100644
index 000000000000..857a0f8ba43e
--- /dev/null
+++ b/eng/common/templates-official/jobs/jobs.yml
@@ -0,0 +1,97 @@
+parameters:
+ # See schema documentation in /Documentation/AzureDevOps/TemplateSchema.md
+ continueOnError: false
+
+ # Optional: Include PublishBuildArtifacts task
+ enablePublishBuildArtifacts: false
+
+ # Optional: Enable publishing using release pipelines
+ enablePublishUsingPipelines: false
+
+ # Optional: Enable running the source-build jobs to build repo from source
+ enableSourceBuild: false
+
+ # Optional: Parameters for source-build template.
+ # See /eng/common/templates-official/jobs/source-build.yml for options
+ sourceBuildParameters: []
+
+ graphFileGeneration:
+ # Optional: Enable generating the graph files at the end of the build
+ enabled: false
+ # Optional: Include toolset dependencies in the generated graph files
+ includeToolset: false
+
+ # Required: A collection of jobs to run - https://docs.microsoft.com/en-us/azure/devops/pipelines/yaml-schema?view=vsts&tabs=schema#job
+ jobs: []
+
+ # Optional: Override automatically derived dependsOn value for "publish build assets" job
+ publishBuildAssetsDependsOn: ''
+
+ # Optional: Publish the assets as soon as the publish to BAR stage is complete, rather doing so in a separate stage.
+ publishAssetsImmediately: false
+
+ # Optional: If using publishAssetsImmediately and additional parameters are needed, can be used to send along additional parameters (normally sent to post-build.yml)
+ artifactsPublishingAdditionalParameters: ''
+ signingValidationAdditionalParameters: ''
+
+ # Optional: should run as a public build even in the internal project
+ # if 'true', the build won't run any of the internal only steps, even if it is running in non-public projects.
+ runAsPublic: false
+
+ enableSourceIndex: false
+ sourceIndexParams: {}
+
+# Internal resources (telemetry, microbuild) can only be accessed from non-public projects,
+# and some (Microbuild) should only be applied to non-PR cases for internal builds.
+
+jobs:
+- ${{ each job in parameters.jobs }}:
+ - template: ../job/job.yml
+ parameters:
+ # pass along parameters
+ ${{ each parameter in parameters }}:
+ ${{ if ne(parameter.key, 'jobs') }}:
+ ${{ parameter.key }}: ${{ parameter.value }}
+
+ # pass along job properties
+ ${{ each property in job }}:
+ ${{ if ne(property.key, 'job') }}:
+ ${{ property.key }}: ${{ property.value }}
+
+ name: ${{ job.job }}
+
+- ${{ if eq(parameters.enableSourceBuild, true) }}:
+ - template: /eng/common/templates-official/jobs/source-build.yml
+ parameters:
+ allCompletedJobId: Source_Build_Complete
+ ${{ each parameter in parameters.sourceBuildParameters }}:
+ ${{ parameter.key }}: ${{ parameter.value }}
+
+- ${{ if eq(parameters.enableSourceIndex, 'true') }}:
+ - template: ../job/source-index-stage1.yml
+ parameters:
+ runAsPublic: ${{ parameters.runAsPublic }}
+ ${{ each parameter in parameters.sourceIndexParams }}:
+ ${{ parameter.key }}: ${{ parameter.value }}
+
+- ${{ if and(eq(parameters.runAsPublic, 'false'), ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
+ - ${{ if or(eq(parameters.enablePublishBuildAssets, true), eq(parameters.artifacts.publish.manifests, 'true'), ne(parameters.artifacts.publish.manifests, '')) }}:
+ - template: ../job/publish-build-assets.yml
+ parameters:
+ continueOnError: ${{ parameters.continueOnError }}
+ dependsOn:
+ - ${{ if ne(parameters.publishBuildAssetsDependsOn, '') }}:
+ - ${{ each job in parameters.publishBuildAssetsDependsOn }}:
+ - ${{ job.job }}
+ - ${{ if eq(parameters.publishBuildAssetsDependsOn, '') }}:
+ - ${{ each job in parameters.jobs }}:
+ - ${{ job.job }}
+ - ${{ if eq(parameters.enableSourceBuild, true) }}:
+ - Source_Build_Complete
+
+ runAsPublic: ${{ parameters.runAsPublic }}
+ publishUsingPipelines: ${{ parameters.enablePublishUsingPipelines }}
+ publishAssetsImmediately: ${{ parameters.publishAssetsImmediately }}
+ enablePublishBuildArtifacts: ${{ parameters.enablePublishBuildArtifacts }}
+ artifactsPublishingAdditionalParameters: ${{ parameters.artifactsPublishingAdditionalParameters }}
+ signingValidationAdditionalParameters: ${{ parameters.signingValidationAdditionalParameters }}
diff --git a/eng/common/templates-official/jobs/source-build.yml b/eng/common/templates-official/jobs/source-build.yml
new file mode 100644
index 000000000000..08e5db9bb116
--- /dev/null
+++ b/eng/common/templates-official/jobs/source-build.yml
@@ -0,0 +1,46 @@
+parameters:
+ # This template adds arcade-powered source-build to CI. A job is created for each platform, as
+ # well as an optional server job that completes when all platform jobs complete.
+
+ # The name of the "join" job for all source-build platforms. If set to empty string, the job is
+ # not included. Existing repo pipelines can use this job depend on all source-build jobs
+ # completing without maintaining a separate list of every single job ID: just depend on this one
+ # server job. By default, not included. Recommended name if used: 'Source_Build_Complete'.
+ allCompletedJobId: ''
+
+ # See /eng/common/templates-official/job/source-build.yml
+ jobNamePrefix: 'Source_Build'
+
+ # This is the default platform provided by Arcade, intended for use by a managed-only repo.
+ defaultManagedPlatform:
+ name: 'Managed'
+ container: 'mcr.microsoft.com/dotnet-buildtools/prereqs:centos-stream8'
+
+ # Defines the platforms on which to run build jobs. One job is created for each platform, and the
+ # object in this array is sent to the job template as 'platform'. If no platforms are specified,
+ # one job runs on 'defaultManagedPlatform'.
+ platforms: []
+
+jobs:
+
+- ${{ if ne(parameters.allCompletedJobId, '') }}:
+ - job: ${{ parameters.allCompletedJobId }}
+ displayName: Source-Build Complete
+ pool: server
+ dependsOn:
+ - ${{ each platform in parameters.platforms }}:
+ - ${{ parameters.jobNamePrefix }}_${{ platform.name }}
+ - ${{ if eq(length(parameters.platforms), 0) }}:
+ - ${{ parameters.jobNamePrefix }}_${{ parameters.defaultManagedPlatform.name }}
+
+- ${{ each platform in parameters.platforms }}:
+ - template: /eng/common/templates-official/job/source-build.yml
+ parameters:
+ jobNamePrefix: ${{ parameters.jobNamePrefix }}
+ platform: ${{ platform }}
+
+- ${{ if eq(length(parameters.platforms), 0) }}:
+ - template: /eng/common/templates-official/job/source-build.yml
+ parameters:
+ jobNamePrefix: ${{ parameters.jobNamePrefix }}
+ platform: ${{ parameters.defaultManagedPlatform }}
diff --git a/eng/common/templates-official/post-build/common-variables.yml b/eng/common/templates-official/post-build/common-variables.yml
new file mode 100644
index 000000000000..b9ede10bf099
--- /dev/null
+++ b/eng/common/templates-official/post-build/common-variables.yml
@@ -0,0 +1,24 @@
+variables:
+ - group: Publish-Build-Assets
+
+ # Whether the build is internal or not
+ - name: IsInternalBuild
+ value: ${{ and(ne(variables['System.TeamProject'], 'public'), contains(variables['Build.SourceBranch'], 'internal')) }}
+
+ # Default Maestro++ API Endpoint and API Version
+ - name: MaestroApiEndPoint
+ value: "https://maestro.dot.net"
+ - name: MaestroApiAccessToken
+ value: $(MaestroAccessToken)
+ - name: MaestroApiVersion
+ value: "2020-02-20"
+
+ - name: SourceLinkCLIVersion
+ value: 3.0.0
+ - name: SymbolToolVersion
+ value: 1.0.1
+ - name: BinlogToolVersion
+ value: 1.0.11
+
+ - name: runCodesignValidationInjection
+ value: false
diff --git a/eng/common/templates-official/post-build/post-build.yml b/eng/common/templates-official/post-build/post-build.yml
new file mode 100644
index 000000000000..5c98fe1c0f3a
--- /dev/null
+++ b/eng/common/templates-official/post-build/post-build.yml
@@ -0,0 +1,285 @@
+parameters:
+ # Which publishing infra should be used. THIS SHOULD MATCH THE VERSION ON THE BUILD MANIFEST.
+ # Publishing V1 is no longer supported
+ # Publishing V2 is no longer supported
+ # Publishing V3 is the default
+ - name: publishingInfraVersion
+ displayName: Which version of publishing should be used to promote the build definition?
+ type: number
+ default: 3
+ values:
+ - 3
+
+ - name: BARBuildId
+ displayName: BAR Build Id
+ type: number
+ default: 0
+
+ - name: PromoteToChannelIds
+ displayName: Channel to promote BARBuildId to
+ type: string
+ default: ''
+
+ - name: enableSourceLinkValidation
+ displayName: Enable SourceLink validation
+ type: boolean
+ default: false
+
+ - name: enableSigningValidation
+ displayName: Enable signing validation
+ type: boolean
+ default: true
+
+ - name: enableSymbolValidation
+ displayName: Enable symbol validation
+ type: boolean
+ default: false
+
+ - name: enableNugetValidation
+ displayName: Enable NuGet validation
+ type: boolean
+ default: true
+
+ - name: publishInstallersAndChecksums
+ displayName: Publish installers and checksums
+ type: boolean
+ default: true
+
+ - name: SDLValidationParameters
+ type: object
+ default:
+ enable: false
+ publishGdn: false
+ continueOnError: false
+ params: ''
+ artifactNames: ''
+ downloadArtifacts: true
+
+ # These parameters let the user customize the call to sdk-task.ps1 for publishing
+ # symbols & general artifacts as well as for signing validation
+ - name: symbolPublishingAdditionalParameters
+ displayName: Symbol publishing additional parameters
+ type: string
+ default: ''
+
+ - name: artifactsPublishingAdditionalParameters
+ displayName: Artifact publishing additional parameters
+ type: string
+ default: ''
+
+ - name: signingValidationAdditionalParameters
+ displayName: Signing validation additional parameters
+ type: string
+ default: ''
+
+ # Which stages should finish execution before post-build stages start
+ - name: validateDependsOn
+ type: object
+ default:
+ - build
+
+ - name: publishDependsOn
+ type: object
+ default:
+ - Validate
+
+ # Optional: Call asset publishing rather than running in a separate stage
+ - name: publishAssetsImmediately
+ type: boolean
+ default: false
+
+stages:
+- ${{ if or(eq( parameters.enableNugetValidation, 'true'), eq(parameters.enableSigningValidation, 'true'), eq(parameters.enableSourceLinkValidation, 'true'), eq(parameters.SDLValidationParameters.enable, 'true')) }}:
+ - stage: Validate
+ dependsOn: ${{ parameters.validateDependsOn }}
+ displayName: Validate Build Assets
+ variables:
+ - template: common-variables.yml
+ - template: /eng/common/templates-official/variables/pool-providers.yml
+ jobs:
+ - job:
+ displayName: NuGet Validation
+ condition: and(succeededOrFailed(), eq( ${{ parameters.enableNugetValidation }}, 'true'))
+ pool:
+ # We don't use the collection uri here because it might vary (.visualstudio.com vs. dev.azure.com)
+ ${{ if eq(variables['System.TeamProject'], 'DevDiv') }}:
+ name: AzurePipelines-EO
+ image: 1ESPT-Windows2022
+ demands: Cmd
+ os: windows
+ # If it's not devdiv, it's dnceng
+ ${{ else }}:
+ name: $(DncEngInternalBuildPool)
+ image: 1es-windows-2022-pt
+ os: windows
+
+ steps:
+ - template: setup-maestro-vars.yml
+ parameters:
+ BARBuildId: ${{ parameters.BARBuildId }}
+ PromoteToChannelIds: ${{ parameters.PromoteToChannelIds }}
+
+ - task: DownloadBuildArtifacts@0
+ displayName: Download Package Artifacts
+ inputs:
+ buildType: specific
+ buildVersionToDownload: specific
+ project: $(AzDOProjectName)
+ pipeline: $(AzDOPipelineId)
+ buildId: $(AzDOBuildId)
+ artifactName: PackageArtifacts
+ checkDownloadedFiles: true
+
+ - task: PowerShell@2
+ displayName: Validate
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/post-build/nuget-validation.ps1
+ arguments: -PackagesPath $(Build.ArtifactStagingDirectory)/PackageArtifacts/
+ -ToolDestinationPath $(Agent.BuildDirectory)/Extract/
+
+ - job:
+ displayName: Signing Validation
+ condition: and( eq( ${{ parameters.enableSigningValidation }}, 'true'), ne( variables['PostBuildSign'], 'true'))
+ pool:
+ # We don't use the collection uri here because it might vary (.visualstudio.com vs. dev.azure.com)
+ ${{ if eq(variables['System.TeamProject'], 'DevDiv') }}:
+ name: AzurePipelines-EO
+ image: 1ESPT-Windows2022
+ demands: Cmd
+ os: windows
+ # If it's not devdiv, it's dnceng
+ ${{ else }}:
+ name: $(DncEngInternalBuildPool)
+ image: 1es-windows-2022-pt
+ os: windows
+ steps:
+ - template: setup-maestro-vars.yml
+ parameters:
+ BARBuildId: ${{ parameters.BARBuildId }}
+ PromoteToChannelIds: ${{ parameters.PromoteToChannelIds }}
+
+ - task: DownloadBuildArtifacts@0
+ displayName: Download Package Artifacts
+ inputs:
+ buildType: specific
+ buildVersionToDownload: specific
+ project: $(AzDOProjectName)
+ pipeline: $(AzDOPipelineId)
+ buildId: $(AzDOBuildId)
+ artifactName: PackageArtifacts
+ checkDownloadedFiles: true
+ itemPattern: |
+ **
+ !**/Microsoft.SourceBuild.Intermediate.*.nupkg
+
+ # This is necessary whenever we want to publish/restore to an AzDO private feed
+ # Since sdk-task.ps1 tries to restore packages we need to do this authentication here
+ # otherwise it'll complain about accessing a private feed.
+ - task: NuGetAuthenticate@1
+ displayName: 'Authenticate to AzDO Feeds'
+
+ # Signing validation will optionally work with the buildmanifest file which is downloaded from
+ # Azure DevOps above.
+ - task: PowerShell@2
+ displayName: Validate
+ inputs:
+ filePath: eng\common\sdk-task.ps1
+ arguments: -task SigningValidation -restore -msbuildEngine vs
+ /p:PackageBasePath='$(Build.ArtifactStagingDirectory)/PackageArtifacts'
+ /p:SignCheckExclusionsFile='$(Build.SourcesDirectory)/eng/SignCheckExclusionsFile.txt'
+ ${{ parameters.signingValidationAdditionalParameters }}
+
+ - template: ../steps/publish-logs.yml
+ parameters:
+ StageLabel: 'Validation'
+ JobLabel: 'Signing'
+ BinlogToolVersion: $(BinlogToolVersion)
+
+ - job:
+ displayName: SourceLink Validation
+ condition: eq( ${{ parameters.enableSourceLinkValidation }}, 'true')
+ pool:
+ # We don't use the collection uri here because it might vary (.visualstudio.com vs. dev.azure.com)
+ ${{ if eq(variables['System.TeamProject'], 'DevDiv') }}:
+ name: AzurePipelines-EO
+ image: 1ESPT-Windows2022
+ demands: Cmd
+ os: windows
+ # If it's not devdiv, it's dnceng
+ ${{ else }}:
+ name: $(DncEngInternalBuildPool)
+ image: 1es-windows-2022-pt
+ os: windows
+ steps:
+ - template: setup-maestro-vars.yml
+ parameters:
+ BARBuildId: ${{ parameters.BARBuildId }}
+ PromoteToChannelIds: ${{ parameters.PromoteToChannelIds }}
+
+ - task: DownloadBuildArtifacts@0
+ displayName: Download Blob Artifacts
+ inputs:
+ buildType: specific
+ buildVersionToDownload: specific
+ project: $(AzDOProjectName)
+ pipeline: $(AzDOPipelineId)
+ buildId: $(AzDOBuildId)
+ artifactName: BlobArtifacts
+ checkDownloadedFiles: true
+
+ - task: PowerShell@2
+ displayName: Validate
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/post-build/sourcelink-validation.ps1
+ arguments: -InputPath $(Build.ArtifactStagingDirectory)/BlobArtifacts/
+ -ExtractPath $(Agent.BuildDirectory)/Extract/
+ -GHRepoName $(Build.Repository.Name)
+ -GHCommit $(Build.SourceVersion)
+ -SourcelinkCliVersion $(SourceLinkCLIVersion)
+ continueOnError: true
+
+- ${{ if ne(parameters.publishAssetsImmediately, 'true') }}:
+ - stage: publish_using_darc
+ ${{ if or(eq(parameters.enableNugetValidation, 'true'), eq(parameters.enableSigningValidation, 'true'), eq(parameters.enableSourceLinkValidation, 'true'), eq(parameters.SDLValidationParameters.enable, 'true')) }}:
+ dependsOn: ${{ parameters.publishDependsOn }}
+ ${{ else }}:
+ dependsOn: ${{ parameters.validateDependsOn }}
+ displayName: Publish using Darc
+ variables:
+ - template: common-variables.yml
+ - template: /eng/common/templates-official/variables/pool-providers.yml
+ jobs:
+ - job:
+ displayName: Publish Using Darc
+ timeoutInMinutes: 120
+ pool:
+ # We don't use the collection uri here because it might vary (.visualstudio.com vs. dev.azure.com)
+ ${{ if eq(variables['System.TeamProject'], 'DevDiv') }}:
+ name: AzurePipelines-EO
+ image: 1ESPT-Windows2022
+ demands: Cmd
+ os: windows
+ # If it's not devdiv, it's dnceng
+ ${{ else }}:
+ name: $(DncEngInternalBuildPool)
+ image: 1es-windows-2022-pt
+ os: windows
+ steps:
+ - template: setup-maestro-vars.yml
+ parameters:
+ BARBuildId: ${{ parameters.BARBuildId }}
+ PromoteToChannelIds: ${{ parameters.PromoteToChannelIds }}
+
+ - task: NuGetAuthenticate@1
+
+ - task: PowerShell@2
+ displayName: Publish Using Darc
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/post-build/publish-using-darc.ps1
+ arguments: -BuildId $(BARBuildId)
+ -PublishingInfraVersion ${{ parameters.publishingInfraVersion }}
+ -AzdoToken '$(publishing-dnceng-devdiv-code-r-build-re)'
+ -MaestroToken '$(MaestroApiAccessToken)'
+ -WaitPublishingFinish true
+ -ArtifactsPublishingAdditionalParameters '${{ parameters.artifactsPublishingAdditionalParameters }}'
+ -SymbolPublishingAdditionalParameters '${{ parameters.symbolPublishingAdditionalParameters }}'
diff --git a/eng/common/templates-official/post-build/setup-maestro-vars.yml b/eng/common/templates-official/post-build/setup-maestro-vars.yml
new file mode 100644
index 000000000000..0c87f149a4ad
--- /dev/null
+++ b/eng/common/templates-official/post-build/setup-maestro-vars.yml
@@ -0,0 +1,70 @@
+parameters:
+ BARBuildId: ''
+ PromoteToChannelIds: ''
+
+steps:
+ - ${{ if eq(coalesce(parameters.PromoteToChannelIds, 0), 0) }}:
+ - task: DownloadBuildArtifacts@0
+ displayName: Download Release Configs
+ inputs:
+ buildType: current
+ artifactName: ReleaseConfigs
+ checkDownloadedFiles: true
+
+ - task: PowerShell@2
+ name: setReleaseVars
+ displayName: Set Release Configs Vars
+ inputs:
+ targetType: inline
+ pwsh: true
+ script: |
+ try {
+ if (!$Env:PromoteToMaestroChannels -or $Env:PromoteToMaestroChannels.Trim() -eq '') {
+ $Content = Get-Content $(Build.StagingDirectory)/ReleaseConfigs/ReleaseConfigs.txt
+
+ $BarId = $Content | Select -Index 0
+ $Channels = $Content | Select -Index 1
+ $IsStableBuild = $Content | Select -Index 2
+
+ $AzureDevOpsProject = $Env:System_TeamProject
+ $AzureDevOpsBuildDefinitionId = $Env:System_DefinitionId
+ $AzureDevOpsBuildId = $Env:Build_BuildId
+ }
+ else {
+ $buildApiEndpoint = "${Env:MaestroApiEndPoint}/api/builds/${Env:BARBuildId}?api-version=${Env:MaestroApiVersion}"
+
+ $apiHeaders = New-Object 'System.Collections.Generic.Dictionary[[String],[String]]'
+ $apiHeaders.Add('Accept', 'application/json')
+ $apiHeaders.Add('Authorization',"Bearer ${Env:MAESTRO_API_TOKEN}")
+
+ $buildInfo = try { Invoke-WebRequest -Method Get -Uri $buildApiEndpoint -Headers $apiHeaders | ConvertFrom-Json } catch { Write-Host "Error: $_" }
+
+ $BarId = $Env:BARBuildId
+ $Channels = $Env:PromoteToMaestroChannels -split ","
+ $Channels = $Channels -join "]["
+ $Channels = "[$Channels]"
+
+ $IsStableBuild = $buildInfo.stable
+ $AzureDevOpsProject = $buildInfo.azureDevOpsProject
+ $AzureDevOpsBuildDefinitionId = $buildInfo.azureDevOpsBuildDefinitionId
+ $AzureDevOpsBuildId = $buildInfo.azureDevOpsBuildId
+ }
+
+ Write-Host "##vso[task.setvariable variable=BARBuildId]$BarId"
+ Write-Host "##vso[task.setvariable variable=TargetChannels]$Channels"
+ Write-Host "##vso[task.setvariable variable=IsStableBuild]$IsStableBuild"
+
+ Write-Host "##vso[task.setvariable variable=AzDOProjectName]$AzureDevOpsProject"
+ Write-Host "##vso[task.setvariable variable=AzDOPipelineId]$AzureDevOpsBuildDefinitionId"
+ Write-Host "##vso[task.setvariable variable=AzDOBuildId]$AzureDevOpsBuildId"
+ }
+ catch {
+ Write-Host $_
+ Write-Host $_.Exception
+ Write-Host $_.ScriptStackTrace
+ exit 1
+ }
+ env:
+ MAESTRO_API_TOKEN: $(MaestroApiAccessToken)
+ BARBuildId: ${{ parameters.BARBuildId }}
+ PromoteToMaestroChannels: ${{ parameters.PromoteToChannelIds }}
diff --git a/eng/common/templates-official/post-build/trigger-subscription.yml b/eng/common/templates-official/post-build/trigger-subscription.yml
new file mode 100644
index 000000000000..da669030daf6
--- /dev/null
+++ b/eng/common/templates-official/post-build/trigger-subscription.yml
@@ -0,0 +1,13 @@
+parameters:
+ ChannelId: 0
+
+steps:
+- task: PowerShell@2
+ displayName: Triggering subscriptions
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/post-build/trigger-subscriptions.ps1
+ arguments: -SourceRepo $(Build.Repository.Uri)
+ -ChannelId ${{ parameters.ChannelId }}
+ -MaestroApiAccessToken $(MaestroAccessToken)
+ -MaestroApiEndPoint $(MaestroApiEndPoint)
+ -MaestroApiVersion $(MaestroApiVersion)
diff --git a/eng/common/templates-official/steps/add-build-to-channel.yml b/eng/common/templates-official/steps/add-build-to-channel.yml
new file mode 100644
index 000000000000..f67a210d62f3
--- /dev/null
+++ b/eng/common/templates-official/steps/add-build-to-channel.yml
@@ -0,0 +1,13 @@
+parameters:
+ ChannelId: 0
+
+steps:
+- task: PowerShell@2
+ displayName: Add Build to Channel
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/post-build/add-build-to-channel.ps1
+ arguments: -BuildId $(BARBuildId)
+ -ChannelId ${{ parameters.ChannelId }}
+ -MaestroApiAccessToken $(MaestroApiAccessToken)
+ -MaestroApiEndPoint $(MaestroApiEndPoint)
+ -MaestroApiVersion $(MaestroApiVersion)
diff --git a/eng/common/templates-official/steps/component-governance.yml b/eng/common/templates-official/steps/component-governance.yml
new file mode 100644
index 000000000000..0ecec47b0c91
--- /dev/null
+++ b/eng/common/templates-official/steps/component-governance.yml
@@ -0,0 +1,13 @@
+parameters:
+ disableComponentGovernance: false
+ componentGovernanceIgnoreDirectories: ''
+
+steps:
+- ${{ if eq(parameters.disableComponentGovernance, 'true') }}:
+ - script: "echo ##vso[task.setvariable variable=skipComponentGovernanceDetection]true"
+ displayName: Set skipComponentGovernanceDetection variable
+- ${{ if ne(parameters.disableComponentGovernance, 'true') }}:
+ - task: ComponentGovernanceComponentDetection@0
+ continueOnError: true
+ inputs:
+ ignoreDirectories: ${{ parameters.componentGovernanceIgnoreDirectories }} \ No newline at end of file
diff --git a/eng/common/templates-official/steps/generate-sbom.yml b/eng/common/templates-official/steps/generate-sbom.yml
new file mode 100644
index 000000000000..488b560e8ba4
--- /dev/null
+++ b/eng/common/templates-official/steps/generate-sbom.yml
@@ -0,0 +1,48 @@
+# BuildDropPath - The root folder of the drop directory for which the manifest file will be generated.
+# PackageName - The name of the package this SBOM represents.
+# PackageVersion - The version of the package this SBOM represents.
+# ManifestDirPath - The path of the directory where the generated manifest files will be placed
+# IgnoreDirectories - Directories to ignore for SBOM generation. This will be passed through to the CG component detector.
+
+parameters:
+ PackageVersion: 7.0.0
+ BuildDropPath: '$(Build.SourcesDirectory)/artifacts'
+ PackageName: '.NET'
+ ManifestDirPath: $(Build.ArtifactStagingDirectory)/sbom
+ IgnoreDirectories: ''
+ sbomContinueOnError: true
+
+steps:
+- task: PowerShell@2
+ displayName: Prep for SBOM generation in (Non-linux)
+ condition: or(eq(variables['Agent.Os'], 'Windows_NT'), eq(variables['Agent.Os'], 'Darwin'))
+ inputs:
+ filePath: ./eng/common/generate-sbom-prep.ps1
+ arguments: ${{parameters.manifestDirPath}}
+
+# Chmodding is a workaround for https://github.com/dotnet/arcade/issues/8461
+- script: |
+ chmod +x ./eng/common/generate-sbom-prep.sh
+ ./eng/common/generate-sbom-prep.sh ${{parameters.manifestDirPath}}
+ displayName: Prep for SBOM generation in (Linux)
+ condition: eq(variables['Agent.Os'], 'Linux')
+ continueOnError: ${{ parameters.sbomContinueOnError }}
+
+- task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0
+ displayName: 'Generate SBOM manifest'
+ continueOnError: ${{ parameters.sbomContinueOnError }}
+ inputs:
+ PackageName: ${{ parameters.packageName }}
+ BuildDropPath: ${{ parameters.buildDropPath }}
+ PackageVersion: ${{ parameters.packageVersion }}
+ ManifestDirPath: ${{ parameters.manifestDirPath }}
+ ${{ if ne(parameters.IgnoreDirectories, '') }}:
+ AdditionalComponentDetectorArgs: '--IgnoreDirectories ${{ parameters.IgnoreDirectories }}'
+
+- task: 1ES.PublishPipelineArtifact@1
+ displayName: Publish SBOM manifest
+ continueOnError: ${{parameters.sbomContinueOnError}}
+ inputs:
+ targetPath: '${{parameters.manifestDirPath}}'
+ artifactName: $(ARTIFACT_NAME)
+
diff --git a/eng/common/templates-official/steps/publish-logs.yml b/eng/common/templates-official/steps/publish-logs.yml
new file mode 100644
index 000000000000..84b2f559c56e
--- /dev/null
+++ b/eng/common/templates-official/steps/publish-logs.yml
@@ -0,0 +1,49 @@
+parameters:
+ StageLabel: ''
+ JobLabel: ''
+ CustomSensitiveDataList: ''
+ # A default - in case value from eng/common/templates-official/post-build/common-variables.yml is not passed
+ BinlogToolVersion: '1.0.11'
+
+steps:
+- task: Powershell@2
+ displayName: Prepare Binlogs to Upload
+ inputs:
+ targetType: inline
+ script: |
+ New-Item -ItemType Directory $(Build.SourcesDirectory)/PostBuildLogs/${{parameters.StageLabel}}/${{parameters.JobLabel}}/
+ Move-Item -Path $(Build.SourcesDirectory)/artifacts/log/Debug/* $(Build.SourcesDirectory)/PostBuildLogs/${{parameters.StageLabel}}/${{parameters.JobLabel}}/
+ continueOnError: true
+ condition: always()
+
+- task: PowerShell@2
+ displayName: Redact Logs
+ inputs:
+ filePath: $(Build.SourcesDirectory)/eng/common/post-build/redact-logs.ps1
+ # For now this needs to have explicit list of all sensitive data. Taken from eng/publishing/v3/publish.yml
+ # Sensitive data can as well be added to $(Build.SourcesDirectory)/eng/BinlogSecretsRedactionFile.txt'
+ # If the file exists - sensitive data for redaction will be sourced from it
+ # (single entry per line, lines starting with '# ' are considered comments and skipped)
+ arguments: -InputPath '$(Build.SourcesDirectory)/PostBuildLogs'
+ -BinlogToolVersion ${{parameters.BinlogToolVersion}}
+ -TokensFilePath '$(Build.SourcesDirectory)/eng/BinlogSecretsRedactionFile.txt'
+ '$(publishing-dnceng-devdiv-code-r-build-re)'
+ '$(MaestroAccessToken)'
+ '$(dn-bot-all-orgs-artifact-feeds-rw)'
+ '$(akams-client-id)'
+ '$(akams-client-secret)'
+ '$(microsoft-symbol-server-pat)'
+ '$(symweb-symbol-server-pat)'
+ '$(dn-bot-all-orgs-build-rw-code-rw)'
+ ${{parameters.CustomSensitiveDataList}}
+ continueOnError: true
+ condition: always()
+
+- task: 1ES.PublishBuildArtifacts@1
+ displayName: Publish Logs
+ inputs:
+ PathtoPublish: '$(Build.SourcesDirectory)/PostBuildLogs'
+ PublishLocation: Container
+ ArtifactName: PostBuildLogs
+ continueOnError: true
+ condition: always()
diff --git a/eng/common/templates-official/steps/retain-build.yml b/eng/common/templates-official/steps/retain-build.yml
new file mode 100644
index 000000000000..83d97a26a01f
--- /dev/null
+++ b/eng/common/templates-official/steps/retain-build.yml
@@ -0,0 +1,28 @@
+parameters:
+ # Optional azure devops PAT with build execute permissions for the build's organization,
+ # only needed if the build that should be retained ran on a different organization than
+ # the pipeline where this template is executing from
+ Token: ''
+ # Optional BuildId to retain, defaults to the current running build
+ BuildId: ''
+ # Azure devops Organization URI for the build in the https://dev.azure.com/<organization> format.
+ # Defaults to the organization the current pipeline is running on
+ AzdoOrgUri: '$(System.CollectionUri)'
+ # Azure devops project for the build. Defaults to the project the current pipeline is running on
+ AzdoProject: '$(System.TeamProject)'
+
+steps:
+ - task: powershell@2
+ inputs:
+ targetType: 'filePath'
+ filePath: eng/common/retain-build.ps1
+ pwsh: true
+ arguments: >
+ -AzdoOrgUri: ${{parameters.AzdoOrgUri}}
+ -AzdoProject ${{parameters.AzdoProject}}
+ -Token ${{coalesce(parameters.Token, '$env:SYSTEM_ACCESSTOKEN') }}
+ -BuildId ${{coalesce(parameters.BuildId, '$env:BUILD_ID')}}
+ displayName: Enable permanent build retention
+ env:
+ SYSTEM_ACCESSTOKEN: $(System.AccessToken)
+ BUILD_ID: $(Build.BuildId) \ No newline at end of file
diff --git a/eng/common/templates-official/steps/send-to-helix.yml b/eng/common/templates-official/steps/send-to-helix.yml
new file mode 100644
index 000000000000..3eb7e2d5f840
--- /dev/null
+++ b/eng/common/templates-official/steps/send-to-helix.yml
@@ -0,0 +1,91 @@
+# Please remember to update the documentation if you make changes to these parameters!
+parameters:
+ HelixSource: 'pr/default' # required -- sources must start with pr/, official/, prodcon/, or agent/
+ HelixType: 'tests/default/' # required -- Helix telemetry which identifies what type of data this is; should include "test" for clarity and must end in '/'
+ HelixBuild: $(Build.BuildNumber) # required -- the build number Helix will use to identify this -- automatically set to the AzDO build number
+ HelixTargetQueues: '' # required -- semicolon-delimited list of Helix queues to test on; see https://helix.dot.net/ for a list of queues
+ HelixAccessToken: '' # required -- access token to make Helix API requests; should be provided by the appropriate variable group
+ HelixConfiguration: '' # optional -- additional property attached to a job
+ HelixPreCommands: '' # optional -- commands to run before Helix work item execution
+ HelixPostCommands: '' # optional -- commands to run after Helix work item execution
+ WorkItemDirectory: '' # optional -- a payload directory to zip up and send to Helix; requires WorkItemCommand; incompatible with XUnitProjects
+ WorkItemCommand: '' # optional -- a command to execute on the payload; requires WorkItemDirectory; incompatible with XUnitProjects
+ WorkItemTimeout: '' # optional -- a timeout in TimeSpan.Parse-ready value (e.g. 00:02:00) for the work item command; requires WorkItemDirectory; incompatible with XUnitProjects
+ CorrelationPayloadDirectory: '' # optional -- a directory to zip up and send to Helix as a correlation payload
+ XUnitProjects: '' # optional -- semicolon-delimited list of XUnitProjects to parse and send to Helix; requires XUnitRuntimeTargetFramework, XUnitPublishTargetFramework, XUnitRunnerVersion, and IncludeDotNetCli=true
+ XUnitWorkItemTimeout: '' # optional -- the workitem timeout in seconds for all workitems created from the xUnit projects specified by XUnitProjects
+ XUnitPublishTargetFramework: '' # optional -- framework to use to publish your xUnit projects
+ XUnitRuntimeTargetFramework: '' # optional -- framework to use for the xUnit console runner
+ XUnitRunnerVersion: '' # optional -- version of the xUnit nuget package you wish to use on Helix; required for XUnitProjects
+ IncludeDotNetCli: false # optional -- true will download a version of the .NET CLI onto the Helix machine as a correlation payload; requires DotNetCliPackageType and DotNetCliVersion
+ DotNetCliPackageType: '' # optional -- either 'sdk', 'runtime' or 'aspnetcore-runtime'; determines whether the sdk or runtime will be sent to Helix; see https://raw.githubusercontent.com/dotnet/core/main/release-notes/releases-index.json
+ DotNetCliVersion: '' # optional -- version of the CLI to send to Helix; based on this: https://raw.githubusercontent.com/dotnet/core/main/release-notes/releases-index.json
+ WaitForWorkItemCompletion: true # optional -- true will make the task wait until work items have been completed and fail the build if work items fail. False is "fire and forget."
+ IsExternal: false # [DEPRECATED] -- doesn't do anything, jobs are external if HelixAccessToken is empty and Creator is set
+ HelixBaseUri: 'https://helix.dot.net/' # optional -- sets the Helix API base URI (allows targeting https://helix.int-dot.net )
+ Creator: '' # optional -- if the build is external, use this to specify who is sending the job
+ DisplayNamePrefix: 'Run Tests' # optional -- rename the beginning of the displayName of the steps in AzDO
+ condition: succeeded() # optional -- condition for step to execute; defaults to succeeded()
+ continueOnError: false # optional -- determines whether to continue the build if the step errors; defaults to false
+
+steps:
+ - powershell: 'powershell "$env:BUILD_SOURCESDIRECTORY\eng\common\msbuild.ps1 $env:BUILD_SOURCESDIRECTORY\eng\common\helixpublish.proj /restore /p:TreatWarningsAsErrors=false /t:Test /bl:$env:BUILD_SOURCESDIRECTORY\artifacts\log\$env:BuildConfig\SendToHelix.binlog"'
+ displayName: ${{ parameters.DisplayNamePrefix }} (Windows)
+ env:
+ BuildConfig: $(_BuildConfig)
+ HelixSource: ${{ parameters.HelixSource }}
+ HelixType: ${{ parameters.HelixType }}
+ HelixBuild: ${{ parameters.HelixBuild }}
+ HelixConfiguration: ${{ parameters.HelixConfiguration }}
+ HelixTargetQueues: ${{ parameters.HelixTargetQueues }}
+ HelixAccessToken: ${{ parameters.HelixAccessToken }}
+ HelixPreCommands: ${{ parameters.HelixPreCommands }}
+ HelixPostCommands: ${{ parameters.HelixPostCommands }}
+ WorkItemDirectory: ${{ parameters.WorkItemDirectory }}
+ WorkItemCommand: ${{ parameters.WorkItemCommand }}
+ WorkItemTimeout: ${{ parameters.WorkItemTimeout }}
+ CorrelationPayloadDirectory: ${{ parameters.CorrelationPayloadDirectory }}
+ XUnitProjects: ${{ parameters.XUnitProjects }}
+ XUnitWorkItemTimeout: ${{ parameters.XUnitWorkItemTimeout }}
+ XUnitPublishTargetFramework: ${{ parameters.XUnitPublishTargetFramework }}
+ XUnitRuntimeTargetFramework: ${{ parameters.XUnitRuntimeTargetFramework }}
+ XUnitRunnerVersion: ${{ parameters.XUnitRunnerVersion }}
+ IncludeDotNetCli: ${{ parameters.IncludeDotNetCli }}
+ DotNetCliPackageType: ${{ parameters.DotNetCliPackageType }}
+ DotNetCliVersion: ${{ parameters.DotNetCliVersion }}
+ WaitForWorkItemCompletion: ${{ parameters.WaitForWorkItemCompletion }}
+ HelixBaseUri: ${{ parameters.HelixBaseUri }}
+ Creator: ${{ parameters.Creator }}
+ SYSTEM_ACCESSTOKEN: $(System.AccessToken)
+ condition: and(${{ parameters.condition }}, eq(variables['Agent.Os'], 'Windows_NT'))
+ continueOnError: ${{ parameters.continueOnError }}
+ - script: $BUILD_SOURCESDIRECTORY/eng/common/msbuild.sh $BUILD_SOURCESDIRECTORY/eng/common/helixpublish.proj /restore /p:TreatWarningsAsErrors=false /t:Test /bl:$BUILD_SOURCESDIRECTORY/artifacts/log/$BuildConfig/SendToHelix.binlog
+ displayName: ${{ parameters.DisplayNamePrefix }} (Unix)
+ env:
+ BuildConfig: $(_BuildConfig)
+ HelixSource: ${{ parameters.HelixSource }}
+ HelixType: ${{ parameters.HelixType }}
+ HelixBuild: ${{ parameters.HelixBuild }}
+ HelixConfiguration: ${{ parameters.HelixConfiguration }}
+ HelixTargetQueues: ${{ parameters.HelixTargetQueues }}
+ HelixAccessToken: ${{ parameters.HelixAccessToken }}
+ HelixPreCommands: ${{ parameters.HelixPreCommands }}
+ HelixPostCommands: ${{ parameters.HelixPostCommands }}
+ WorkItemDirectory: ${{ parameters.WorkItemDirectory }}
+ WorkItemCommand: ${{ parameters.WorkItemCommand }}
+ WorkItemTimeout: ${{ parameters.WorkItemTimeout }}
+ CorrelationPayloadDirectory: ${{ parameters.CorrelationPayloadDirectory }}
+ XUnitProjects: ${{ parameters.XUnitProjects }}
+ XUnitWorkItemTimeout: ${{ parameters.XUnitWorkItemTimeout }}
+ XUnitPublishTargetFramework: ${{ parameters.XUnitPublishTargetFramework }}
+ XUnitRuntimeTargetFramework: ${{ parameters.XUnitRuntimeTargetFramework }}
+ XUnitRunnerVersion: ${{ parameters.XUnitRunnerVersion }}
+ IncludeDotNetCli: ${{ parameters.IncludeDotNetCli }}
+ DotNetCliPackageType: ${{ parameters.DotNetCliPackageType }}
+ DotNetCliVersion: ${{ parameters.DotNetCliVersion }}
+ WaitForWorkItemCompletion: ${{ parameters.WaitForWorkItemCompletion }}
+ HelixBaseUri: ${{ parameters.HelixBaseUri }}
+ Creator: ${{ parameters.Creator }}
+ SYSTEM_ACCESSTOKEN: $(System.AccessToken)
+ condition: and(${{ parameters.condition }}, ne(variables['Agent.Os'], 'Windows_NT'))
+ continueOnError: ${{ parameters.continueOnError }}
diff --git a/eng/common/templates-official/steps/source-build.yml b/eng/common/templates-official/steps/source-build.yml
new file mode 100644
index 000000000000..b1db70842f51
--- /dev/null
+++ b/eng/common/templates-official/steps/source-build.yml
@@ -0,0 +1,129 @@
+parameters:
+ # This template adds arcade-powered source-build to CI.
+
+ # This is a 'steps' template, and is intended for advanced scenarios where the existing build
+ # infra has a careful build methodology that must be followed. For example, a repo
+ # (dotnet/runtime) might choose to clone the GitHub repo only once and store it as a pipeline
+ # artifact for all subsequent jobs to use, to reduce dependence on a strong network connection to
+ # GitHub. Using this steps template leaves room for that infra to be included.
+
+ # Defines the platform on which to run the steps. See 'eng/common/templates-official/job/source-build.yml'
+ # for details. The entire object is described in the 'job' template for simplicity, even though
+ # the usage of the properties on this object is split between the 'job' and 'steps' templates.
+ platform: {}
+
+steps:
+# Build. Keep it self-contained for simple reusability. (No source-build-specific job variables.)
+- script: |
+ set -x
+ df -h
+
+ # If building on the internal project, the artifact feeds variable may be available (usually only if needed)
+ # In that case, call the feed setup script to add internal feeds corresponding to public ones.
+ # In addition, add an msbuild argument to copy the WIP from the repo to the target build location.
+ # This is because SetupNuGetSources.sh will alter the current NuGet.config file, and we need to preserve those
+ # changes.
+ internalRestoreArgs=
+ if [ '$(dn-bot-dnceng-artifact-feeds-rw)' != '$''(dn-bot-dnceng-artifact-feeds-rw)' ]; then
+ # Temporarily work around https://github.com/dotnet/arcade/issues/7709
+ chmod +x $(Build.SourcesDirectory)/eng/common/SetupNugetSources.sh
+ $(Build.SourcesDirectory)/eng/common/SetupNugetSources.sh $(Build.SourcesDirectory)/NuGet.config $(dn-bot-dnceng-artifact-feeds-rw)
+ internalRestoreArgs='/p:CopyWipIntoInnerSourceBuildRepo=true'
+
+ # The 'Copy WIP' feature of source build uses git stash to apply changes from the original repo.
+ # This only works if there is a username/email configured, which won't be the case in most CI runs.
+ git config --get user.email
+ if [ $? -ne 0 ]; then
+ git config user.email dn-bot@microsoft.com
+ git config user.name dn-bot
+ fi
+ fi
+
+ # If building on the internal project, the internal storage variable may be available (usually only if needed)
+ # In that case, add variables to allow the download of internal runtimes if the specified versions are not found
+ # in the default public locations.
+ internalRuntimeDownloadArgs=
+ if [ '$(dotnetbuilds-internal-container-read-token-base64)' != '$''(dotnetbuilds-internal-container-read-token-base64)' ]; then
+ internalRuntimeDownloadArgs='/p:DotNetRuntimeSourceFeed=https://dotnetbuilds.blob.core.windows.net/internal /p:DotNetRuntimeSourceFeedKey=$(dotnetbuilds-internal-container-read-token-base64) --runtimesourcefeed https://dotnetbuilds.blob.core.windows.net/internal --runtimesourcefeedkey $(dotnetbuilds-internal-container-read-token-base64)'
+ fi
+
+ buildConfig=Release
+ # Check if AzDO substitutes in a build config from a variable, and use it if so.
+ if [ '$(_BuildConfig)' != '$''(_BuildConfig)' ]; then
+ buildConfig='$(_BuildConfig)'
+ fi
+
+ officialBuildArgs=
+ if [ '${{ and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}' = 'True' ]; then
+ officialBuildArgs='/p:DotNetPublishUsingPipelines=true /p:OfficialBuildId=$(BUILD.BUILDNUMBER)'
+ fi
+
+ targetRidArgs=
+ if [ '${{ parameters.platform.targetRID }}' != '' ]; then
+ targetRidArgs='/p:TargetRid=${{ parameters.platform.targetRID }}'
+ fi
+
+ runtimeOsArgs=
+ if [ '${{ parameters.platform.runtimeOS }}' != '' ]; then
+ runtimeOsArgs='/p:RuntimeOS=${{ parameters.platform.runtimeOS }}'
+ fi
+
+ baseOsArgs=
+ if [ '${{ parameters.platform.baseOS }}' != '' ]; then
+ baseOsArgs='/p:BaseOS=${{ parameters.platform.baseOS }}'
+ fi
+
+ publishArgs=
+ if [ '${{ parameters.platform.skipPublishValidation }}' != 'true' ]; then
+ publishArgs='--publish'
+ fi
+
+ assetManifestFileName=SourceBuild_RidSpecific.xml
+ if [ '${{ parameters.platform.name }}' != '' ]; then
+ assetManifestFileName=SourceBuild_${{ parameters.platform.name }}.xml
+ fi
+
+ ${{ coalesce(parameters.platform.buildScript, './build.sh') }} --ci \
+ --configuration $buildConfig \
+ --restore --build --pack $publishArgs -bl \
+ $officialBuildArgs \
+ $internalRuntimeDownloadArgs \
+ $internalRestoreArgs \
+ $targetRidArgs \
+ $runtimeOsArgs \
+ $baseOsArgs \
+ /p:SourceBuildNonPortable=${{ parameters.platform.nonPortable }} \
+ /p:ArcadeBuildFromSource=true \
+ /p:AssetManifestFileName=$assetManifestFileName
+ displayName: Build
+
+# Upload build logs for diagnosis.
+- task: CopyFiles@2
+ displayName: Prepare BuildLogs staging directory
+ inputs:
+ SourceFolder: '$(Build.SourcesDirectory)'
+ Contents: |
+ **/*.log
+ **/*.binlog
+ artifacts/sb/prebuilt-report/**
+ TargetFolder: '$(Build.StagingDirectory)/BuildLogs'
+ CleanTargetFolder: true
+ continueOnError: true
+ condition: succeededOrFailed()
+
+- task: 1ES.PublishPipelineArtifact@1
+ displayName: Publish BuildLogs
+ inputs:
+ targetPath: '$(Build.StagingDirectory)/BuildLogs'
+ artifactName: BuildLogs_SourceBuild_${{ parameters.platform.name }}_Attempt$(System.JobAttempt)
+ continueOnError: true
+ condition: succeededOrFailed()
+
+# Manually inject component detection so that we can ignore the source build upstream cache, which contains
+# a nupkg cache of input packages (a local feed).
+# This path must match the upstream cache path in property 'CurrentRepoSourceBuiltNupkgCacheDir'
+# in src\Microsoft.DotNet.Arcade.Sdk\tools\SourceBuild\SourceBuildArcade.targets
+- task: ComponentGovernanceComponentDetection@0
+ displayName: Component Detection (Exclude upstream cache)
+ inputs:
+ ignoreDirectories: '$(Build.SourcesDirectory)/artifacts/sb/src/artifacts/obj/source-built-upstream-cache'
diff --git a/eng/common/templates-official/variables/pool-providers.yml b/eng/common/templates-official/variables/pool-providers.yml
new file mode 100644
index 000000000000..beab7d1bfba0
--- /dev/null
+++ b/eng/common/templates-official/variables/pool-providers.yml
@@ -0,0 +1,45 @@
+# Select a pool provider based off branch name. Anything with branch name containing 'release' must go into an -Svc pool,
+# otherwise it should go into the "normal" pools. This separates out the queueing and billing of released branches.
+
+# Motivation:
+# Once a given branch of a repository's output has been officially "shipped" once, it is then considered to be COGS
+# (Cost of goods sold) and should be moved to a servicing pool provider. This allows both separation of queueing
+# (allowing release builds and main PR builds to not intefere with each other) and billing (required for COGS.
+# Additionally, the pool provider name itself may be subject to change when the .NET Core Engineering Services
+# team needs to move resources around and create new and potentially differently-named pools. Using this template
+# file from an Arcade-ified repo helps guard against both having to update one's release/* branches and renaming.
+
+# How to use:
+# This yaml assumes your shipped product branches use the naming convention "release/..." (which many do).
+# If we find alternate naming conventions in broad usage it can be added to the condition below.
+#
+# First, import the template in an arcade-ified repo to pick up the variables, e.g.:
+#
+# variables:
+# - template: /eng/common/templates-official/variables/pool-providers.yml
+#
+# ... then anywhere specifying the pool provider use the runtime variables,
+# $(DncEngInternalBuildPool)
+#
+# pool:
+# name: $(DncEngInternalBuildPool)
+# image: 1es-windows-2022-pt
+
+variables:
+ # Coalesce the target and source branches so we know when a PR targets a release branch
+ # If these variables are somehow missing, fall back to main (tends to have more capacity)
+
+ # Any new -Svc alternative pools should have variables added here to allow for splitting work
+
+ - name: DncEngInternalBuildPool
+ value: $[
+ replace(
+ replace(
+ eq(contains(coalesce(variables['System.PullRequest.TargetBranch'], variables['Build.SourceBranch'], 'refs/heads/main'), 'release'), 'true'),
+ True,
+ 'NetCore1ESPool-Svc-Internal'
+ ),
+ False,
+ 'NetCore1ESPool-Internal'
+ )
+ ] \ No newline at end of file
diff --git a/eng/common/templates-official/variables/sdl-variables.yml b/eng/common/templates-official/variables/sdl-variables.yml
new file mode 100644
index 000000000000..dbdd66d4a4b3
--- /dev/null
+++ b/eng/common/templates-official/variables/sdl-variables.yml
@@ -0,0 +1,7 @@
+variables:
+# The Guardian version specified in 'eng/common/sdl/packages.config'. This value must be kept in
+# sync with the packages.config file.
+- name: DefaultGuardianVersion
+ value: 0.109.0
+- name: GuardianPackagesConfigFile
+ value: $(Build.SourcesDirectory)\eng\common\sdl\packages.config \ No newline at end of file
diff --git a/eng/common/tools.ps1 b/eng/common/tools.ps1
index 7d8dc89b919b..9bf873e3c258 100644
--- a/eng/common/tools.ps1
+++ b/eng/common/tools.ps1
@@ -65,6 +65,11 @@ $ErrorActionPreference = 'Stop'
# Base-64 encoded SAS token that has permission to storage container described by $runtimeSourceFeed
[string]$runtimeSourceFeedKey = if (Test-Path variable:runtimeSourceFeedKey) { $runtimeSourceFeedKey } else { $null }
+# True if the build is a product build
+[bool]$productBuild = if (Test-Path variable:productBuild) { $productBuild } else { $false }
+
+[String[]]$properties = if (Test-Path variable:properties) { $properties } else { @() }
+
function Create-Directory ([string[]] $path) {
New-Item -Path $path -Force -ItemType 'Directory' | Out-Null
}
@@ -850,7 +855,8 @@ function MSBuild-Core() {
}
# When running on Azure Pipelines, override the returned exit code to avoid double logging.
- if ($ci -and $env:SYSTEM_TEAMPROJECT -ne $null) {
+ # Skip this when the build is a child of the VMR orchestrator build.
+ if ($ci -and $env:SYSTEM_TEAMPROJECT -ne $null -and !$productBuild -and $properties -notlike "*DotNetBuildRepo=true*") {
Write-PipelineSetResult -Result "Failed" -Message "msbuild execution failed."
# Exiting with an exit code causes the azure pipelines task to log yet another "noise" error
# The above Write-PipelineSetResult will cause the task to be marked as failure without adding yet another error
diff --git a/eng/common/tools.sh b/eng/common/tools.sh
index ece4b7307953..db64e298ff63 100755
--- a/eng/common/tools.sh
+++ b/eng/common/tools.sh
@@ -68,6 +68,9 @@ fi
runtime_source_feed=${runtime_source_feed:-''}
runtime_source_feed_key=${runtime_source_feed_key:-''}
+# True if the build is a product build
+product_build=${product_build:-false}
+
# Resolve any symlinks in the given path.
function ResolvePath {
local path=$1
@@ -141,7 +144,7 @@ function InitializeDotNetCli {
if [[ $global_json_has_runtimes == false && -n "${DOTNET_INSTALL_DIR:-}" && -d "$DOTNET_INSTALL_DIR/sdk/$dotnet_sdk_version" ]]; then
dotnet_root="$DOTNET_INSTALL_DIR"
else
- dotnet_root="$repo_root/.dotnet"
+ dotnet_root="${repo_root}.dotnet"
export DOTNET_INSTALL_DIR="$dotnet_root"
@@ -503,7 +506,8 @@ function MSBuild-Core {
echo "Build failed with exit code $exit_code. Check errors above."
# When running on Azure Pipelines, override the returned exit code to avoid double logging.
- if [[ "$ci" == "true" && -n ${SYSTEM_TEAMPROJECT:-} ]]; then
+ # Skip this when the build is a child of the VMR orchestrator build.
+ if [[ "$ci" == true && -n ${SYSTEM_TEAMPROJECT:-} && "$product_build" != true && "$properties" != *"DotNetBuildRepo=true"* ]]; then
Write-PipelineSetResult -result "Failed" -message "msbuild execution failed."
# Exiting with an exit code causes the azure pipelines task to log yet another "noise" error
# The above Write-PipelineSetResult will cause the task to be marked as failure without adding yet another error
diff --git a/eng/pipelines/common/templates/runtimes/run-test-job.yml b/eng/pipelines/common/templates/runtimes/run-test-job.yml
index 07d580cb96a9..2d6bae58e08a 100644
--- a/eng/pipelines/common/templates/runtimes/run-test-job.yml
+++ b/eng/pipelines/common/templates/runtimes/run-test-job.yml
@@ -565,7 +565,7 @@ jobs:
- jitobjectstackallocation
- jitphysicalpromotion_only
- jitphysicalpromotion_full
- - jitcrossblocklocalassertionprop
+ - jitrlcse
${{ if in(parameters.testGroup, 'jit-cfg') }}:
scenarios:
- jitcfg
diff --git a/eng/pipelines/common/xplat-setup.yml b/eng/pipelines/common/xplat-setup.yml
index c66fec22bead..dfb8952b423e 100644
--- a/eng/pipelines/common/xplat-setup.yml
+++ b/eng/pipelines/common/xplat-setup.yml
@@ -171,9 +171,13 @@ jobs:
demands: ImageOverride -equals Build.Ubuntu.2204.Amd64
# OSX Build Pool (we don't have on-prem OSX BuildPool).
- ${{ if in(parameters.osGroup, 'osx', 'maccatalyst', 'ios', 'iossimulator', 'tvos', 'tvossimulator') }}:
+ ${{ if and(in(parameters.osGroup, 'osx', 'maccatalyst', 'ios', 'iossimulator', 'tvos', 'tvossimulator'), eq(variables['System.TeamProject'], 'public')) }}:
vmImage: 'macos-12'
+ # Official build OSX pool
+ ${{ if and(in(parameters.osGroup, 'osx', 'maccatalyst', 'ios', 'iossimulator', 'tvos', 'tvossimulator'), ne(variables['System.TeamProject'], 'public')) }}:
+ vmImage: 'macos-13-arm64'
+
# Official Build Windows Pool
${{ if and(or(eq(parameters.osGroup, 'windows'), eq(parameters.jobParameters.hostedOs, 'windows')), ne(variables['System.TeamProject'], 'public')) }}:
name: $(DncEngInternalBuildPool)
diff --git a/eng/pipelines/coreclr/libraries-pgo.yml b/eng/pipelines/coreclr/libraries-pgo.yml
index a7766f2fb881..2ac83670dd51 100644
--- a/eng/pipelines/coreclr/libraries-pgo.yml
+++ b/eng/pipelines/coreclr/libraries-pgo.yml
@@ -70,4 +70,4 @@ extends:
- jitosr_stress_random
- syntheticpgo
- syntheticpgo_blend
- - jitcrossblocklocalassertionprop
+ - jitrlcse
diff --git a/eng/pipelines/coreclr/perf-non-wasm-jobs.yml b/eng/pipelines/coreclr/perf-non-wasm-jobs.yml
index 7de5a003edff..3a3d14b70a05 100644
--- a/eng/pipelines/coreclr/perf-non-wasm-jobs.yml
+++ b/eng/pipelines/coreclr/perf-non-wasm-jobs.yml
@@ -307,6 +307,24 @@ jobs:
logicalmachine: 'perfowl'
experimentName: 'gdv3'
+ # run coreclr perfowl microbenchmarks perf rlcse jobs
+ - template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/coreclr/templates/perf-job.yml
+ buildConfig: release
+ runtimeFlavor: coreclr
+ platforms:
+ - linux_x64
+ - windows_x64
+ jobParameters:
+ testGroup: perf
+ liveLibrariesBuildConfig: Release
+ projectFile: microbenchmarks.proj
+ runKind: micro
+ runJobTemplate: /eng/pipelines/coreclr/templates/run-performance-job.yml
+ logicalmachine: 'perfowl'
+ experimentName: 'rlcse'
+
# run coreclr crossgen perf job
- template: /eng/pipelines/common/platform-matrix.yml
parameters:
diff --git a/eng/pipelines/coreclr/runtime-nativeaot-outerloop.yml b/eng/pipelines/coreclr/runtime-nativeaot-outerloop.yml
index 164485e7d007..de23519f9c62 100644
--- a/eng/pipelines/coreclr/runtime-nativeaot-outerloop.yml
+++ b/eng/pipelines/coreclr/runtime-nativeaot-outerloop.yml
@@ -60,14 +60,17 @@ extends:
- osx_x64
- osx_arm64
- linux_x64
+ - linux_arm
- linux_arm64
- linux_musl_x64
+ - linux_musl_arm64
jobParameters:
testGroup: innerloop
isSingleFile: true
nameSuffix: NativeAOT_Libs
- buildArgs: -s clr.aot+host.native+libs+libs.tests -c $(_BuildConfig) /p:TestNativeAot=true /p:ArchiveTests=true /p:IlcUseServerGc=false
+ buildArgs: -s clr.aot+host.native+libs+libs.tests -c $(_BuildConfig) /p:TestNativeAot=true /p:ArchiveTests=true /p:IlcUseServerGc=false /p:RunAnalyzers=false
timeoutInMinutes: 300 # doesn't normally take this long, but I've seen Helix queues backed up for 160 minutes
+ includeAllPlatforms: true
# extra steps, run tests
postBuildSteps:
- template: /eng/pipelines/libraries/helix.yml
@@ -91,7 +94,7 @@ extends:
testGroup: innerloop
isSingleFile: true
nameSuffix: NativeAOT_Checked_Libs
- buildArgs: -s clr.aot+host.native+libs+libs.tests -c $(_BuildConfig) -rc Checked /p:TestNativeAot=true /p:ArchiveTests=true /p:IlcUseServerGc=false
+ buildArgs: -s clr.aot+host.native+libs+libs.tests -c $(_BuildConfig) -rc Checked /p:TestNativeAot=true /p:ArchiveTests=true /p:IlcUseServerGc=false /p:RunAnalyzers=false
timeoutInMinutes: 360
# extra steps, run tests
postBuildSteps:
@@ -116,7 +119,7 @@ extends:
testGroup: innerloop
isSingleFile: true
nameSuffix: NativeAOT_Checked_Libs_SizeOpt
- buildArgs: -s clr.aot+host.native+libs+libs.tests -c $(_BuildConfig) -rc Checked /p:TestNativeAot=true /p:ArchiveTests=true /p:OptimizationPreference=Size /p:IlcUseServerGc=false
+ buildArgs: -s clr.aot+host.native+libs+libs.tests -c $(_BuildConfig) -rc Checked /p:TestNativeAot=true /p:ArchiveTests=true /p:OptimizationPreference=Size /p:IlcUseServerGc=false /p:RunAnalyzers=false
timeoutInMinutes: 240
# extra steps, run tests
postBuildSteps:
@@ -141,7 +144,7 @@ extends:
testGroup: innerloop
isSingleFile: true
nameSuffix: NativeAOT_Checked_Libs_SpeedOpt
- buildArgs: -s clr.aot+host.native+libs+libs.tests -c $(_BuildConfig) -rc Checked /p:TestNativeAot=true /p:ArchiveTests=true /p:OptimizationPreference=Speed /p:IlcUseServerGc=false
+ buildArgs: -s clr.aot+host.native+libs+libs.tests -c $(_BuildConfig) -rc Checked /p:TestNativeAot=true /p:ArchiveTests=true /p:OptimizationPreference=Speed /p:IlcUseServerGc=false /p:RunAnalyzers=false
timeoutInMinutes: 240
# extra steps, run tests
postBuildSteps:
@@ -162,6 +165,7 @@ extends:
platforms:
- windows_x64
- linux_x64
+ - linux_arm
variables:
- name: timeoutPerTestInMinutes
value: 60
@@ -170,7 +174,7 @@ extends:
jobParameters:
timeoutInMinutes: 240
nameSuffix: NativeAOT_Pri0
- buildArgs: -s clr.aot+host.native+libs -rc $(_BuildConfig) -lc Release -hc Release
+ buildArgs: -s clr.aot+host.native+libs -rc $(_BuildConfig) -lc Release -hc Release /p:RunAnalyzers=false
postBuildSteps:
- template: /eng/pipelines/coreclr/nativeaot-post-build-steps.yml
parameters:
diff --git a/eng/pipelines/coreclr/templates/helix-queues-setup.yml b/eng/pipelines/coreclr/templates/helix-queues-setup.yml
index 7b4ce6c6c7f4..933919411144 100644
--- a/eng/pipelines/coreclr/templates/helix-queues-setup.yml
+++ b/eng/pipelines/coreclr/templates/helix-queues-setup.yml
@@ -86,9 +86,9 @@ jobs:
# Linux musl arm32
- ${{ if eq(parameters.platform, 'linux_musl_arm') }}:
- ${{ if eq(variables['System.TeamProject'], 'public') }}:
- - (Alpine.316.Arm32.Open)Ubuntu.2004.ArmArch.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.16-helix-arm32v7
+ - (Alpine.316.Arm32.Open)Ubuntu.2004.ArmArch.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.17-helix-arm32v7
- ${{ if eq(variables['System.TeamProject'], 'internal') }}:
- - (Alpine.316.Arm32)Ubuntu.2004.ArmArch@mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.16-helix-arm32v7
+ - (Alpine.316.Arm32)Ubuntu.2004.ArmArch@mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.17-helix-arm32v7
# Linux musl arm64
- ${{ if eq(parameters.platform, 'linux_musl_arm64') }}:
diff --git a/eng/pipelines/runtime.yml b/eng/pipelines/runtime.yml
index 615a0318030f..ab1477f56dec 100644
--- a/eng/pipelines/runtime.yml
+++ b/eng/pipelines/runtime.yml
@@ -544,7 +544,7 @@ extends:
jobParameters:
timeoutInMinutes: 120
nameSuffix: NativeAOT
- buildArgs: -s clr.aot+host.native+libs -rc $(_BuildConfig) -lc Release -hc Release
+ buildArgs: -s clr.aot+host.native+libs -rc $(_BuildConfig) -lc Release -hc Release /p:RunAnalyzers=false
postBuildSteps:
- template: /eng/pipelines/coreclr/nativeaot-post-build-steps.yml
parameters:
@@ -583,7 +583,7 @@ extends:
jobParameters:
timeoutInMinutes: 180
nameSuffix: NativeAOT
- buildArgs: -s clr.aot+host.native+libs.native+libs.sfx -rc $(_BuildConfig) -lc Release -hc Release
+ buildArgs: -s clr.aot+host.native+libs.native+libs.sfx -rc $(_BuildConfig) -lc Release -hc Release /p:RunAnalyzers=false
postBuildSteps:
- template: /eng/pipelines/coreclr/nativeaot-post-build-steps.yml
parameters:
@@ -628,7 +628,7 @@ extends:
testGroup: innerloop
timeoutInMinutes: 120
nameSuffix: NativeAOT
- buildArgs: -s clr.aot+host.native+libs+tools.illink -c $(_BuildConfig) -rc $(_BuildConfig) -lc Release -hc Release
+ buildArgs: -s clr.aot+host.native+libs+tools.illink -c $(_BuildConfig) -rc $(_BuildConfig) -lc Release -hc Release /p:RunAnalyzers=false
postBuildSteps:
- template: /eng/pipelines/coreclr/nativeaot-post-build-steps.yml
parameters:
@@ -665,7 +665,7 @@ extends:
testGroup: innerloop
isSingleFile: true
nameSuffix: NativeAOT_Libraries
- buildArgs: -s clr.aot+host.native+libs+libs.tests -c $(_BuildConfig) /p:TestNativeAot=true /p:RunSmokeTestsOnly=true /p:ArchiveTests=true
+ buildArgs: -s clr.aot+host.native+libs+libs.tests -c $(_BuildConfig) /p:TestNativeAot=true /p:RunSmokeTestsOnly=true /p:ArchiveTests=true /p:RunAnalyzers=false
timeoutInMinutes: 240 # Doesn't actually take long, but we've seen the ARM64 Helix queue often get backlogged for 2+ hours
# extra steps, run tests
postBuildSteps:
diff --git a/eng/testing/ChromeVersions.props b/eng/testing/ChromeVersions.props
index 8549c7646a51..ecf01665114d 100644
--- a/eng/testing/ChromeVersions.props
+++ b/eng/testing/ChromeVersions.props
@@ -1,12 +1,12 @@
<Project>
<PropertyGroup>
- <linux_ChromeVersion>121.0.6167.184</linux_ChromeVersion>
- <linux_ChromeRevision>1233107</linux_ChromeRevision>
- <linux_ChromeBaseSnapshotUrl>https://storage.googleapis.com/chromium-browser-snapshots/Linux_x64/1233114</linux_ChromeBaseSnapshotUrl>
- <linux_V8Version>12.1.285</linux_V8Version>
- <win_ChromeVersion>121.0.6167.185</win_ChromeVersion>
- <win_ChromeRevision>1233107</win_ChromeRevision>
- <win_ChromeBaseSnapshotUrl>https://storage.googleapis.com/chromium-browser-snapshots/Win_x64/1233136</win_ChromeBaseSnapshotUrl>
- <win_V8Version>12.1.285</win_V8Version>
+ <linux_ChromeVersion>122.0.6261.69</linux_ChromeVersion>
+ <linux_ChromeRevision>1250580</linux_ChromeRevision>
+ <linux_ChromeBaseSnapshotUrl>https://storage.googleapis.com/chromium-browser-snapshots/Linux_x64/1250580</linux_ChromeBaseSnapshotUrl>
+ <linux_V8Version>12.2.281</linux_V8Version>
+ <win_ChromeVersion>122.0.6261.69</win_ChromeVersion>
+ <win_ChromeRevision>1250580</win_ChromeRevision>
+ <win_ChromeBaseSnapshotUrl>https://storage.googleapis.com/chromium-browser-snapshots/Win_x64/1250586</win_ChromeBaseSnapshotUrl>
+ <win_V8Version>12.2.281</win_V8Version>
</PropertyGroup>
</Project> \ No newline at end of file
diff --git a/eng/testing/scenarios/BuildWasmAppsJobsList.txt b/eng/testing/scenarios/BuildWasmAppsJobsList.txt
index 5519c03e6ef7..5ccb34b25e18 100644
--- a/eng/testing/scenarios/BuildWasmAppsJobsList.txt
+++ b/eng/testing/scenarios/BuildWasmAppsJobsList.txt
@@ -36,12 +36,13 @@ Wasm.Build.Tests.TestAppScenarios.AppSettingsTests
Wasm.Build.Tests.TestAppScenarios.LazyLoadingTests
Wasm.Build.Tests.TestAppScenarios.LibraryInitializerTests
Wasm.Build.Tests.TestAppScenarios.SatelliteLoadingTests
+Wasm.Build.Tests.TestAppScenarios.DownloadResourceProgressTests
+Wasm.Build.Tests.TestAppScenarios.SignalRClientTests
Wasm.Build.Tests.WasmBuildAppTest
Wasm.Build.Tests.WasmNativeDefaultsTests
Wasm.Build.Tests.WasmRunOutOfAppBundleTests
Wasm.Build.Tests.WasmSIMDTests
Wasm.Build.Tests.WasmTemplateTests
Wasm.Build.Tests.WorkloadTests
-Wasm.Build.Tests.TestAppScenarios.DownloadResourceProgressTests
Wasm.Build.Tests.MT.Blazor.SimpleMultiThreadedTests
Wasm.Build.Tests.TestAppScenarios.DebugLevelTests
diff --git a/global.json b/global.json
index 1e159da9c546..0d5c06c2e3ff 100644
--- a/global.json
+++ b/global.json
@@ -1,18 +1,18 @@
{
"sdk": {
- "version": "9.0.100-alpha.1.23615.4",
+ "version": "9.0.100-preview.1.24101.2",
"allowPrerelease": true,
"rollForward": "major"
},
"tools": {
- "dotnet": "9.0.100-alpha.1.23615.4"
+ "dotnet": "9.0.100-preview.1.24101.2"
},
"msbuild-sdks": {
- "Microsoft.DotNet.Arcade.Sdk": "9.0.0-beta.24112.1",
- "Microsoft.DotNet.Helix.Sdk": "9.0.0-beta.24112.1",
- "Microsoft.DotNet.SharedFramework.Sdk": "9.0.0-beta.24112.1",
+ "Microsoft.DotNet.Arcade.Sdk": "9.0.0-beta.24151.1",
+ "Microsoft.DotNet.Helix.Sdk": "9.0.0-beta.24151.1",
+ "Microsoft.DotNet.SharedFramework.Sdk": "9.0.0-beta.24151.1",
"Microsoft.Build.NoTargets": "3.7.0",
"Microsoft.Build.Traversal": "3.4.0",
- "Microsoft.NET.Sdk.IL": "9.0.0-preview.2.24116.2"
+ "Microsoft.NET.Sdk.IL": "9.0.0-preview.3.24126.1"
}
}
diff --git a/src/coreclr/System.Private.CoreLib/System.Private.CoreLib.csproj b/src/coreclr/System.Private.CoreLib/System.Private.CoreLib.csproj
index 6b3ddff0cc86..d005341dae6b 100644
--- a/src/coreclr/System.Private.CoreLib/System.Private.CoreLib.csproj
+++ b/src/coreclr/System.Private.CoreLib/System.Private.CoreLib.csproj
@@ -203,7 +203,6 @@
<Compile Include="$(BclSourcesRoot)\System\Reflection\RuntimePropertyInfo.cs" />
<Compile Include="$(BclSourcesRoot)\System\Reflection\TypeNameParser.CoreCLR.cs" />
<Compile Include="$(BclSourcesRoot)\System\Reflection\Metadata\RuntimeTypeMetadataUpdateHandler.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Resources\ManifestBasedResourceGroveler.CoreCLR.cs" />
<Compile Include="$(BclSourcesRoot)\System\Runtime\CompilerServices\CastHelpers.cs" />
<Compile Include="$(BclSourcesRoot)\System\Runtime\CompilerServices\ICastableHelpers.cs" />
<Compile Include="$(BclSourcesRoot)\System\Runtime\CompilerServices\RuntimeHelpers.CoreCLR.cs" />
@@ -231,7 +230,6 @@
<Compile Include="$(BclSourcesRoot)\System\RuntimeHandles.cs" />
<Compile Include="$(BclSourcesRoot)\System\RuntimeType.ActivatorCache.cs" />
<Compile Include="$(BclSourcesRoot)\System\RuntimeType.CoreCLR.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Security\DynamicSecurityMethodAttribute.cs" />
<Compile Include="$(BclSourcesRoot)\System\StartupHookProvider.CoreCLR.cs" />
<Compile Include="$(BclSourcesRoot)\System\String.CoreCLR.cs" />
<Compile Include="$(BclSourcesRoot)\System\StubHelpers.cs" />
diff --git a/src/coreclr/System.Private.CoreLib/src/Internal/Runtime/InteropServices/ComActivator.cs b/src/coreclr/System.Private.CoreLib/src/Internal/Runtime/InteropServices/ComActivator.cs
index db8d4ead4659..77f64abd1b42 100644
--- a/src/coreclr/System.Private.CoreLib/src/Internal/Runtime/InteropServices/ComActivator.cs
+++ b/src/coreclr/System.Private.CoreLib/src/Internal/Runtime/InteropServices/ComActivator.cs
@@ -203,7 +203,6 @@ namespace Internal.Runtime.InteropServices
// Finally validate signature
ReadOnlySpan<ParameterInfo> methParams = method.GetParametersAsSpan();
if (method.ReturnType != typeof(void)
- || methParams == null
|| methParams.Length != 1
|| (methParams[0].ParameterType != typeof(string) && methParams[0].ParameterType != typeof(Type)))
{
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Array.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Array.CoreCLR.cs
index 16d9067567ee..de7b3021c458 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Array.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Array.CoreCLR.cs
@@ -74,7 +74,7 @@ namespace System
if (pMT->ContainsGCPointers)
Buffer.BulkMoveWithWriteBarrier(ref dst, ref src, byteCount);
else
- Buffer.Memmove(ref dst, ref src, byteCount);
+ SpanHelpers.Memmove(ref dst, ref src, byteCount);
// GC.KeepAlive(sourceArray) not required. pMT kept alive via sourceArray
return;
@@ -184,7 +184,7 @@ namespace System
}
else
{
- Buffer.Memmove(ref dest, ref obj.GetRawData(), destSize);
+ SpanHelpers.Memmove(ref dest, ref obj.GetRawData(), destSize);
}
}
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Object.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Object.CoreCLR.cs
index 70cff629fc28..940d1622bad1 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Object.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Object.CoreCLR.cs
@@ -1,6 +1,7 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
+using System.Diagnostics;
using System.Runtime.CompilerServices;
namespace System
@@ -19,7 +20,9 @@ namespace System
[Intrinsic]
protected internal unsafe object MemberwiseClone()
{
- object clone = RuntimeHelpers.AllocateUninitializedClone(this);
+ object clone = this;
+ RuntimeHelpers.AllocateUninitializedClone(ObjectHandleOnStack.Create(ref clone));
+ Debug.Assert(clone != this);
// copy contents of "this" to the clone
@@ -30,7 +33,7 @@ namespace System
if (RuntimeHelpers.GetMethodTable(clone)->ContainsGCPointers)
Buffer.BulkMoveWithWriteBarrier(ref dst, ref src, byteCount);
else
- Buffer.Memmove(ref dst, ref src, byteCount);
+ SpanHelpers.Memmove(ref dst, ref src, byteCount);
return clone;
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/DynamicILGenerator.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/DynamicILGenerator.cs
index 2b695f1baf5b..327113c63f9a 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/DynamicILGenerator.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/DynamicILGenerator.cs
@@ -417,7 +417,7 @@ namespace System.Reflection.Emit
throw new ArgumentException(SR.Argument_MustBeRuntimeMethodInfo, nameof(methodInfo));
ReadOnlySpan<ParameterInfo> paramInfo = methodInfo.GetParametersAsSpan();
- if (paramInfo != null && paramInfo.Length != 0)
+ if (paramInfo.Length != 0)
{
parameterTypes = new Type[paramInfo.Length];
requiredCustomModifiers = new Type[parameterTypes.Length][];
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeAssembly.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeAssembly.cs
index b5cff2f1e42e..53f2690948df 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeAssembly.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeAssembly.cs
@@ -645,27 +645,29 @@ namespace System.Reflection
{
ArgumentNullException.ThrowIfNull(culture);
- return InternalGetSatelliteAssembly(culture, version, throwOnFileNotFound: true)!;
+ return InternalGetSatelliteAssembly(this, culture, version, throwOnFileNotFound: true)!;
}
[DynamicSecurityMethod] // Methods containing StackCrawlMark local var has to be marked DynamicSecurityMethod
- internal Assembly? InternalGetSatelliteAssembly(CultureInfo culture,
+ internal static Assembly? InternalGetSatelliteAssembly(Assembly assembly,
+ CultureInfo culture,
Version? version,
bool throwOnFileNotFound)
{
var an = new AssemblyName();
- an.SetPublicKey(GetPublicKey());
- an.Flags = GetFlags() | AssemblyNameFlags.PublicKey;
- an.Version = version ?? GetVersion();
+ RuntimeAssembly runtimeAssembly = (RuntimeAssembly)assembly;
+ an.SetPublicKey(runtimeAssembly.GetPublicKey());
+ an.Flags = runtimeAssembly.GetFlags() | AssemblyNameFlags.PublicKey;
+ an.Version = version ?? runtimeAssembly.GetVersion();
an.CultureInfo = culture;
- an.Name = GetSimpleName() + ".resources";
+ an.Name = runtimeAssembly.GetSimpleName() + ".resources";
// This stack crawl mark is never used because the requesting assembly is explicitly specified,
// so the value could be anything.
StackCrawlMark unused = default;
- RuntimeAssembly? retAssembly = InternalLoad(an, ref unused, requestingAssembly: this, throwOnFileNotFound: throwOnFileNotFound);
+ RuntimeAssembly? retAssembly = InternalLoad(an, ref unused, requestingAssembly: runtimeAssembly, throwOnFileNotFound: throwOnFileNotFound);
- if (retAssembly == this)
+ if (retAssembly == runtimeAssembly)
{
retAssembly = null;
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.CoreCLR.cs
deleted file mode 100644
index 05805072cd7c..000000000000
--- a/src/coreclr/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.CoreCLR.cs
+++ /dev/null
@@ -1,19 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System.Globalization;
-using System.Reflection;
-
-namespace System.Resources
-{
- internal sealed partial class ManifestBasedResourceGroveler
- {
- // Internal version of GetSatelliteAssembly that avoids throwing FileNotFoundException
- private static Assembly? InternalGetSatelliteAssembly(Assembly mainAssembly,
- CultureInfo culture,
- Version? version)
- {
- return ((RuntimeAssembly)mainAssembly).InternalGetSatelliteAssembly(culture, version, throwOnFileNotFound: false);
- }
- }
-}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.CoreCLR.cs
index 02ecf9656892..733e3a664bcc 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.CoreCLR.cs
@@ -139,8 +139,32 @@ namespace System.Runtime.CompilerServices
[MethodImpl(MethodImplOptions.InternalCall)]
internal static extern int TryGetHashCode(object o);
+ public static new unsafe bool Equals(object? o1, object? o2)
+ {
+ // Compare by ref for normal classes, by value for value types.
+
+ if (ReferenceEquals(o1, o2))
+ return true;
+
+ if (o1 is null || o2 is null)
+ return false;
+
+ MethodTable* pMT = GetMethodTable(o1);
+
+ // If it's not a value class, don't compare by value
+ if (!pMT->IsValueType)
+ return false;
+
+ // Make sure they are the same type.
+ if (pMT != GetMethodTable(o2))
+ return false;
+
+ // Compare the contents
+ return ContentEquals(o1, o2);
+ }
+
[MethodImpl(MethodImplOptions.InternalCall)]
- public static extern new bool Equals(object? o1, object? o2);
+ private static extern unsafe bool ContentEquals(object o1, object o2);
[Obsolete("OffsetToStringData has been deprecated. Use string.GetPinnableReference() instead.")]
public static int OffsetToStringData
@@ -194,8 +218,8 @@ namespace System.Runtime.CompilerServices
return rt.GetUninitializedObject();
}
- [MethodImpl(MethodImplOptions.InternalCall)]
- internal static extern object AllocateUninitializedClone(object obj);
+ [LibraryImport(QCall, EntryPoint = "ObjectNative_AllocateUninitializedClone")]
+ internal static partial void AllocateUninitializedClone(ObjectHandleOnStack objHandle);
/// <returns>true if given type is reference type or value type that contains references</returns>
[Intrinsic]
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Runtime/ExceptionServices/InternalCalls.cs b/src/coreclr/System.Private.CoreLib/src/System/Runtime/ExceptionServices/InternalCalls.cs
index 4ae608fc17d2..228f58c0ea4d 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Runtime/ExceptionServices/InternalCalls.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Runtime/ExceptionServices/InternalCalls.cs
@@ -42,7 +42,7 @@ namespace System.Runtime.ExceptionServices
[LibraryImport(RuntimeHelpers.QCall, EntryPoint = "EHEnumInitFromStackFrameIterator")]
[return: MarshalAs(UnmanagedType.Bool)]
- internal static unsafe partial bool RhpEHEnumInitFromStackFrameIterator(ref StackFrameIterator pFrameIter, byte** pMethodStartAddress, void* pEHEnum);
+ internal static unsafe partial bool RhpEHEnumInitFromStackFrameIterator(ref StackFrameIterator pFrameIter, out EH.MethodRegionInfo pMethodRegionInfo, void* pEHEnum);
[LibraryImport(RuntimeHelpers.QCall, EntryPoint = "EHEnumNext")]
[return: MarshalAs(UnmanagedType.Bool)]
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.CoreCLR.cs
index c04665aa6c22..bbdccc6cd2ee 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.CoreCLR.cs
@@ -266,7 +266,7 @@ namespace System.Runtime.InteropServices
}
else
{
- Buffer.Memmove(ref *(byte*)ptr, ref structure.GetRawData(), size);
+ SpanHelpers.Memmove(ref *(byte*)ptr, ref structure.GetRawData(), size);
}
}
@@ -291,7 +291,7 @@ namespace System.Runtime.InteropServices
}
else
{
- Buffer.Memmove(ref structure.GetRawData(), ref *(byte*)ptr, size);
+ SpanHelpers.Memmove(ref structure.GetRawData(), ref *(byte*)ptr, size);
}
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/RuntimeType.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/RuntimeType.CoreCLR.cs
index c74d76388b91..00a8d78685d4 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/RuntimeType.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/RuntimeType.CoreCLR.cs
@@ -2757,7 +2757,12 @@ namespace System
MethodBase? rtTypeMethodBase = GetMethodBase(reflectedType, classRtMethodHandle);
// a class may not implement all the methods of an interface (abstract class) so null is a valid value
Debug.Assert(rtTypeMethodBase is null || rtTypeMethodBase is RuntimeMethodInfo);
- im.TargetMethods[i] = (MethodInfo)rtTypeMethodBase!;
+ RuntimeMethodInfo? targetMethod = (RuntimeMethodInfo?)rtTypeMethodBase;
+ // the TargetMethod provided to us by runtime internals may be a generic method instance,
+ // potentially with invalid arguments. TargetMethods in the InterfaceMap should never be
+ // instances, only definitions.
+ im.TargetMethods[i] = (targetMethod is { IsGenericMethod: true, IsGenericMethodDefinition: false })
+ ? targetMethod.GetGenericMethodDefinition() : targetMethod!;
}
return im;
diff --git a/src/coreclr/System.Private.CoreLib/src/System/String.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/String.CoreCLR.cs
index f15ad03d8218..d2785251613e 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/String.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/String.CoreCLR.cs
@@ -39,7 +39,7 @@ namespace System
{
if (len != 0)
{
- Buffer.Memmove(ref *(byte*)dest, ref Unsafe.As<char, byte>(ref src.GetRawStringData()), (nuint)len);
+ SpanHelpers.Memmove(ref *(byte*)dest, ref Unsafe.As<char, byte>(ref src.GetRawStringData()), (nuint)len);
}
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/StubHelpers.cs b/src/coreclr/System.Private.CoreLib/src/System/StubHelpers.cs
index 9874eef6dc22..81c0dd8e1afe 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/StubHelpers.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/StubHelpers.cs
@@ -103,7 +103,7 @@ namespace System.StubHelpers
// + 1 for the null character from the user. + 1 for the null character we put in.
pbNativeBuffer = (byte*)Marshal.AllocCoTaskMem(nb + 2);
- Buffer.Memmove(ref *pbNativeBuffer, ref MemoryMarshal.GetArrayDataReference(bytes), (nuint)nb);
+ SpanHelpers.Memmove(ref *pbNativeBuffer, ref MemoryMarshal.GetArrayDataReference(bytes), (nuint)nb);
}
}
@@ -360,7 +360,7 @@ namespace System.StubHelpers
Debug.Assert(nbytesused >= 0 && nbytesused < nbytes, "Insufficient buffer allocated in VBByValStrMarshaler.ConvertToNative");
- Buffer.Memmove(ref *pNative, ref MemoryMarshal.GetArrayDataReference(bytes), (nuint)nbytesused);
+ SpanHelpers.Memmove(ref *pNative, ref MemoryMarshal.GetArrayDataReference(bytes), (nuint)nbytesused);
pNative[nbytesused] = 0;
*pLength = nbytesused;
@@ -409,7 +409,7 @@ namespace System.StubHelpers
IntPtr bstr = Marshal.AllocBSTRByteLen(length);
if (bytes != null)
{
- Buffer.Memmove(ref *(byte*)bstr, ref MemoryMarshal.GetArrayDataReference(bytes), length);
+ SpanHelpers.Memmove(ref *(byte*)bstr, ref MemoryMarshal.GetArrayDataReference(bytes), length);
}
return bstr;
@@ -1484,7 +1484,7 @@ namespace System.StubHelpers
}
else
{
- Buffer.Memmove(ref *pNative, ref obj.GetRawData(), size);
+ SpanHelpers.Memmove(ref *pNative, ref obj.GetRawData(), size);
}
}
@@ -1503,7 +1503,7 @@ namespace System.StubHelpers
}
else
{
- Buffer.Memmove(ref obj.GetRawData(), ref *pNative, size);
+ SpanHelpers.Memmove(ref obj.GetRawData(), ref *pNative, size);
}
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Threading/WaitHandle.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Threading/WaitHandle.CoreCLR.cs
index b70a688e9d55..142747309218 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Threading/WaitHandle.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Threading/WaitHandle.CoreCLR.cs
@@ -9,7 +9,7 @@ namespace System.Threading
public abstract partial class WaitHandle
{
[MethodImpl(MethodImplOptions.InternalCall)]
- private static extern int WaitOneCore(IntPtr waitHandle, int millisecondsTimeout, bool useTrivialWaits);
+ private static extern int WaitOneCore(IntPtr waitHandle, int millisecondsTimeout);
private static unsafe int WaitMultipleIgnoringSyncContextCore(Span<IntPtr> waitHandles, bool waitAll, int millisecondsTimeout)
{
diff --git a/src/coreclr/System.Private.CoreLib/src/System/ValueType.cs b/src/coreclr/System.Private.CoreLib/src/System/ValueType.cs
index 78301866c36d..f4c3acb31adf 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/ValueType.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/ValueType.cs
@@ -120,7 +120,7 @@ namespace System
else
{
object thisRef = this;
- switch (GetHashCodeStrategy(pMT, ObjectHandleOnStack.Create(ref thisRef), out uint fieldOffset, out uint fieldSize))
+ switch (GetHashCodeStrategy(pMT, ObjectHandleOnStack.Create(ref thisRef), out uint fieldOffset, out uint fieldSize, out MethodTable* fieldMT))
{
case ValueTypeHashCodeStrategy.ReferenceField:
hashCode.Add(Unsafe.As<byte, object>(ref Unsafe.AddByteOffset(ref rawData, fieldOffset)).GetHashCode());
@@ -138,6 +138,12 @@ namespace System
Debug.Assert(fieldSize != 0);
hashCode.AddBytes(MemoryMarshal.CreateReadOnlySpan(ref Unsafe.AddByteOffset(ref rawData, fieldOffset), (int)fieldSize));
break;
+
+ case ValueTypeHashCodeStrategy.ValueTypeOverride:
+ Debug.Assert(fieldMT != null);
+ // Box the field to handle complicated cases like mutable method and shared generic
+ hashCode.Add(RuntimeHelpers.Box(fieldMT, ref Unsafe.AddByteOffset(ref rawData, fieldOffset))?.GetHashCode() ?? 0);
+ break;
}
}
@@ -152,11 +158,12 @@ namespace System
DoubleField,
SingleField,
FastGetHashCode,
+ ValueTypeOverride,
}
[LibraryImport(RuntimeHelpers.QCall, EntryPoint = "ValueType_GetHashCodeStrategy")]
private static unsafe partial ValueTypeHashCodeStrategy GetHashCodeStrategy(
- MethodTable* pMT, ObjectHandleOnStack objHandle, out uint fieldOffset, out uint fieldSize);
+ MethodTable* pMT, ObjectHandleOnStack objHandle, out uint fieldOffset, out uint fieldSize, out MethodTable* fieldMT);
public override string? ToString()
{
diff --git a/src/coreclr/classlibnative/bcltype/objectnative.cpp b/src/coreclr/classlibnative/bcltype/objectnative.cpp
index 4622955b44ad..afbda5fad991 100644
--- a/src/coreclr/classlibnative/bcltype/objectnative.cpp
+++ b/src/coreclr/classlibnative/bcltype/objectnative.cpp
@@ -123,48 +123,22 @@ FCIMPL1(INT32, ObjectNative::TryGetHashCode, Object* obj) {
}
FCIMPLEND
-//
-// Compare by ref for normal classes, by value for value types.
-//
-// <TODO>@todo: it would be nice to customize this method based on the
-// defining class rather than doing a runtime check whether it is
-// a value type.</TODO>
-//
-
-FCIMPL2(FC_BOOL_RET, ObjectNative::Equals, Object *pThisRef, Object *pCompareRef)
+FCIMPL2(FC_BOOL_RET, ObjectNative::ContentEquals, Object *pThisRef, Object *pCompareRef)
{
- CONTRACTL
- {
- FCALL_CHECK;
- INJECT_FAULT(FCThrow(kOutOfMemoryException););
- }
- CONTRACTL_END;
-
- if (pThisRef == pCompareRef)
- FC_RETURN_BOOL(TRUE);
+ FCALL_CONTRACT;
- // Since we are in FCALL, we must handle NULL specially.
- if (pThisRef == NULL || pCompareRef == NULL)
- FC_RETURN_BOOL(FALSE);
+ // Should be ensured by caller
+ _ASSERTE(pThisRef != NULL);
+ _ASSERTE(pCompareRef != NULL);
+ _ASSERTE(pThisRef->GetMethodTable() == pCompareRef->GetMethodTable());
MethodTable *pThisMT = pThisRef->GetMethodTable();
- // If it's not a value class, don't compare by value
- if (!pThisMT->IsValueType())
- FC_RETURN_BOOL(FALSE);
-
- // Make sure they are the same type.
- if (pThisMT != pCompareRef->GetMethodTable())
- FC_RETURN_BOOL(FALSE);
-
- // Compare the contents (size - vtable - sync block index).
- DWORD dwBaseSize = pThisMT->GetBaseSize();
- if(pThisMT == g_pStringClass)
- dwBaseSize -= sizeof(WCHAR);
+ // Compare the contents
BOOL ret = memcmp(
- (void *) (pThisRef+1),
- (void *) (pCompareRef+1),
- dwBaseSize - sizeof(Object) - sizeof(int)) == 0;
+ pThisRef->GetData(),
+ pCompareRef->GetData(),
+ pThisMT->GetNumInstanceFieldBytes()) == 0;
FC_GC_POLL_RET();
@@ -215,36 +189,34 @@ FCIMPL1(Object*, ObjectNative::GetClass, Object* pThis)
}
FCIMPLEND
-FCIMPL1(Object*, ObjectNative::AllocateUninitializedClone, Object* pObjUNSAFE)
+extern "C" void QCALLTYPE ObjectNative_AllocateUninitializedClone(QCall::ObjectHandleOnStack objHandle)
{
- FCALL_CONTRACT;
-
- // Delegate error handling to managed side (it will throw NullReferenceException)
- if (pObjUNSAFE == NULL)
- return NULL;
+ QCALL_CONTRACT;
- OBJECTREF refClone = ObjectToOBJECTREF(pObjUNSAFE);
+ BEGIN_QCALL;
- HELPER_METHOD_FRAME_BEGIN_RET_1(refClone);
+ GCX_COOP();
+ OBJECTREF refClone = objHandle.Get();
+ _ASSERTE(refClone != NULL); // Should be handled at managed side
MethodTable* pMT = refClone->GetMethodTable();
-
+
// assert that String has overloaded the Clone() method
_ASSERTE(pMT != g_pStringClass);
-
- if (pMT->IsArray()) {
- refClone = DupArrayForCloning((BASEARRAYREF)refClone);
- } else {
+
+ if (pMT->IsArray())
+ {
+ objHandle.Set(DupArrayForCloning((BASEARRAYREF)refClone));
+ }
+ else
+ {
// We don't need to call the <cinit> because we know
// that it has been called....(It was called before this was created)
- refClone = AllocateObject(pMT);
+ objHandle.Set(AllocateObject(pMT));
}
- HELPER_METHOD_FRAME_END();
-
- return OBJECTREFToObject(refClone);
+ END_QCALL;
}
-FCIMPLEND
extern "C" BOOL QCALLTYPE Monitor_Wait(QCall::ObjectHandleOnStack pThis, INT32 Timeout)
{
diff --git a/src/coreclr/classlibnative/bcltype/objectnative.h b/src/coreclr/classlibnative/bcltype/objectnative.h
index d8948922dd0b..418fd2561d7c 100644
--- a/src/coreclr/classlibnative/bcltype/objectnative.h
+++ b/src/coreclr/classlibnative/bcltype/objectnative.h
@@ -27,12 +27,12 @@ public:
static FCDECL1(INT32, GetHashCode, Object* vThisRef);
static FCDECL1(INT32, TryGetHashCode, Object* vThisRef);
- static FCDECL2(FC_BOOL_RET, Equals, Object *pThisRef, Object *pCompareRef);
- static FCDECL1(Object*, AllocateUninitializedClone, Object* pObjUNSAFE);
+ static FCDECL2(FC_BOOL_RET, ContentEquals, Object *pThisRef, Object *pCompareRef);
static FCDECL1(Object*, GetClass, Object* pThis);
static FCDECL1(FC_BOOL_RET, IsLockHeld, Object* pThisUNSAFE);
};
+extern "C" void QCALLTYPE ObjectNative_AllocateUninitializedClone(QCall::ObjectHandleOnStack objHandle);
extern "C" BOOL QCALLTYPE Monitor_Wait(QCall::ObjectHandleOnStack pThis, INT32 Timeout);
extern "C" void QCALLTYPE Monitor_Pulse(QCall::ObjectHandleOnStack pThis);
extern "C" void QCALLTYPE Monitor_PulseAll(QCall::ObjectHandleOnStack pThis);
diff --git a/src/coreclr/classlibnative/bcltype/system.cpp b/src/coreclr/classlibnative/bcltype/system.cpp
index ef02743b3669..6e9a9a9ee956 100644
--- a/src/coreclr/classlibnative/bcltype/system.cpp
+++ b/src/coreclr/classlibnative/bcltype/system.cpp
@@ -145,7 +145,7 @@ WCHAR *g_pFailFastBuffer = g_szFailFastBuffer;
// This is the common code for FailFast processing that is wrapped by the two
// FailFast FCalls below.
-void SystemNative::GenericFailFast(STRINGREF refMesgString, EXCEPTIONREF refExceptionForWatsonBucketing, UINT_PTR retAddress, UINT exitCode, STRINGREF refErrorSourceString)
+void SystemNative::GenericFailFast(STRINGREF refMesgString, EXCEPTIONREF refExceptionForWatsonBucketing, UINT_PTR retAddress, STRINGREF refErrorSourceString)
{
CONTRACTL
{
@@ -282,7 +282,7 @@ void SystemNative::GenericFailFast(STRINGREF refMesgString, EXCEPTIONREF refExce
if (gc.refExceptionForWatsonBucketing != NULL)
pThread->SetLastThrownObject(gc.refExceptionForWatsonBucketing);
- EEPolicy::HandleFatalError(exitCode, retAddress, pszMessage, NULL, errorSourceString, argExceptionString);
+ EEPolicy::HandleFatalError(COR_E_FAILFAST, retAddress, pszMessage, NULL, errorSourceString, argExceptionString);
GCPROTECT_END();
}
@@ -301,25 +301,7 @@ FCIMPL1(VOID, SystemNative::FailFast, StringObject* refMessageUNSAFE)
UINT_PTR retaddr = HELPER_METHOD_FRAME_GET_RETURN_ADDRESS();
// Call the actual worker to perform failfast
- GenericFailFast(refMessage, NULL, retaddr, COR_E_FAILFAST, NULL);
-
- HELPER_METHOD_FRAME_END();
-}
-FCIMPLEND
-
-FCIMPL2(VOID, SystemNative::FailFastWithExitCode, StringObject* refMessageUNSAFE, UINT exitCode)
-{
- FCALL_CONTRACT;
-
- STRINGREF refMessage = (STRINGREF)refMessageUNSAFE;
-
- HELPER_METHOD_FRAME_BEGIN_1(refMessage);
-
- // The HelperMethodFrame knows how to get the return address.
- UINT_PTR retaddr = HELPER_METHOD_FRAME_GET_RETURN_ADDRESS();
-
- // Call the actual worker to perform failfast
- GenericFailFast(refMessage, NULL, retaddr, exitCode, NULL);
+ GenericFailFast(refMessage, NULL, retaddr, NULL);
HELPER_METHOD_FRAME_END();
}
@@ -338,7 +320,7 @@ FCIMPL2(VOID, SystemNative::FailFastWithException, StringObject* refMessageUNSAF
UINT_PTR retaddr = HELPER_METHOD_FRAME_GET_RETURN_ADDRESS();
// Call the actual worker to perform failfast
- GenericFailFast(refMessage, refException, retaddr, COR_E_FAILFAST, NULL);
+ GenericFailFast(refMessage, refException, retaddr, NULL);
HELPER_METHOD_FRAME_END();
}
@@ -358,7 +340,7 @@ FCIMPL3(VOID, SystemNative::FailFastWithExceptionAndSource, StringObject* refMes
UINT_PTR retaddr = HELPER_METHOD_FRAME_GET_RETURN_ADDRESS();
// Call the actual worker to perform failfast
- GenericFailFast(refMessage, refException, retaddr, COR_E_FAILFAST, errorSource);
+ GenericFailFast(refMessage, refException, retaddr, errorSource);
HELPER_METHOD_FRAME_END();
}
diff --git a/src/coreclr/classlibnative/bcltype/system.h b/src/coreclr/classlibnative/bcltype/system.h
index b4a773a847c3..e440f1fa8b06 100644
--- a/src/coreclr/classlibnative/bcltype/system.h
+++ b/src/coreclr/classlibnative/bcltype/system.h
@@ -44,7 +44,6 @@ public:
static FCDECL0(INT32, GetExitCode);
static FCDECL1(VOID, FailFast, StringObject* refMessageUNSAFE);
- static FCDECL2(VOID, FailFastWithExitCode, StringObject* refMessageUNSAFE, UINT exitCode);
static FCDECL2(VOID, FailFastWithException, StringObject* refMessageUNSAFE, ExceptionObject* refExceptionUNSAFE);
static FCDECL3(VOID, FailFastWithExceptionAndSource, StringObject* refMessageUNSAFE, ExceptionObject* refExceptionUNSAFE, StringObject* errorSourceUNSAFE);
@@ -55,7 +54,7 @@ public:
private:
// Common processing code for FailFast
- static void GenericFailFast(STRINGREF refMesgString, EXCEPTIONREF refExceptionForWatsonBucketing, UINT_PTR retAddress, UINT exitCode, STRINGREF errorSource);
+ static void GenericFailFast(STRINGREF refMesgString, EXCEPTIONREF refExceptionForWatsonBucketing, UINT_PTR retAddress, STRINGREF errorSource);
};
extern "C" void QCALLTYPE Environment_Exit(INT32 exitcode);
diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake
index fb8d095b5606..2097ef5360ef 100644
--- a/src/coreclr/clrdefinitions.cmake
+++ b/src/coreclr/clrdefinitions.cmake
@@ -258,7 +258,7 @@ function(set_target_definitions_to_custom_os_and_arch)
if (TARGETDETAILS_OS STREQUAL "unix_anyos")
target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE TARGET_UNIX_ANYOS)
endif()
- elseif (TARGETDETAILS_OS STREQUAL "win")
+ elseif (TARGETDETAILS_OS MATCHES "^win")
target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE TARGET_WINDOWS)
endif((TARGETDETAILS_OS MATCHES "^unix"))
@@ -287,7 +287,7 @@ function(set_target_definitions_to_custom_os_and_arch)
target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE ARM_SOFTFP)
endif()
- if (NOT (TARGETDETAILS_ARCH STREQUAL "x86") OR (TARGETDETAILS_OS MATCHES "^unix"))
+ if (NOT (TARGETDETAILS_ARCH STREQUAL "x86") OR (TARGETDETAILS_OS MATCHES "^unix") OR (TARGETDETAILS_OS MATCHES "win_aot"))
target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE FEATURE_EH_FUNCLETS)
- endif (NOT (TARGETDETAILS_ARCH STREQUAL "x86") OR (TARGETDETAILS_OS MATCHES "^unix"))
+ endif (NOT (TARGETDETAILS_ARCH STREQUAL "x86") OR (TARGETDETAILS_OS MATCHES "^unix") OR (TARGETDETAILS_OS MATCHES "win_aot"))
endfunction()
diff --git a/src/coreclr/crosscomponents.cmake b/src/coreclr/crosscomponents.cmake
index 11e923805a6e..b06b70607048 100644
--- a/src/coreclr/crosscomponents.cmake
+++ b/src/coreclr/crosscomponents.cmake
@@ -25,6 +25,13 @@ if (CLR_CMAKE_HOST_OS STREQUAL CLR_CMAKE_TARGET_OS OR CLR_CMAKE_TARGET_IOS OR CL
DESTINATIONS .
COMPONENT crosscomponents
)
+ if (CLR_CMAKE_TARGET_ARCH_I386)
+ install_clr (TARGETS
+ clrjit_win_aot_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME}
+ DESTINATIONS .
+ COMPONENT crosscomponents
+ )
+ endif()
endif()
endif()
endif()
diff --git a/src/coreclr/debug/daccess/dacdbiimpl.cpp b/src/coreclr/debug/daccess/dacdbiimpl.cpp
index bc1e6ad84754..b316a0769d3f 100644
--- a/src/coreclr/debug/daccess/dacdbiimpl.cpp
+++ b/src/coreclr/debug/daccess/dacdbiimpl.cpp
@@ -7455,13 +7455,13 @@ HRESULT DacDbiInterfaceImpl::GetILCodeVersionNodeData(VMPTR_ILCodeVersionNode vm
#ifdef FEATURE_REJIT
ILCodeVersion ilCode(vmILCodeVersionNode.GetDacPtr());
pData->m_state = ilCode.GetRejitState();
- pData->m_pbIL = PTR_TO_CORDB_ADDRESS(dac_cast<ULONG_PTR>(ilCode.GetIL()));
+ pData->m_pbIL = PTR_TO_CORDB_ADDRESS(dac_cast<TADDR>(ilCode.GetIL()));
pData->m_dwCodegenFlags = ilCode.GetJitFlags();
const InstrumentedILOffsetMapping* pMapping = ilCode.GetInstrumentedILMap();
if (pMapping)
{
pData->m_cInstrumentedMapEntries = (ULONG)pMapping->GetCount();
- pData->m_rgInstrumentedMapEntries = PTR_TO_CORDB_ADDRESS(dac_cast<ULONG_PTR>(pMapping->GetOffsets()));
+ pData->m_rgInstrumentedMapEntries = PTR_TO_CORDB_ADDRESS(dac_cast<TADDR>(pMapping->GetOffsets()));
}
else
{
diff --git a/src/coreclr/debug/daccess/request.cpp b/src/coreclr/debug/daccess/request.cpp
index 1e0912ea05cd..cc438d1c903c 100644
--- a/src/coreclr/debug/daccess/request.cpp
+++ b/src/coreclr/debug/daccess/request.cpp
@@ -3810,9 +3810,13 @@ ClrDataAccess::GetJumpThunkTarget(T_CONTEXT *ctx, CLRDATA_ADDRESS *targetIP, CLR
#ifdef TARGET_AMD64
SOSDacEnter();
- if (!GetAnyThunkTarget(ctx, targetIP, targetMD))
+ TADDR tempTargetIP, tempTargetMD;
+ if (!GetAnyThunkTarget(ctx, &tempTargetIP, &tempTargetMD))
hr = E_FAIL;
+ *targetIP = TO_CDADDR(tempTargetIP);
+ *targetMD = TO_CDADDR(tempTargetMD);
+
SOSDacLeave();
return hr;
#else
diff --git a/src/coreclr/debug/ee/controller.h b/src/coreclr/debug/ee/controller.h
index a2d8dc2e2602..b838e11c0f85 100644
--- a/src/coreclr/debug/ee/controller.h
+++ b/src/coreclr/debug/ee/controller.h
@@ -827,7 +827,7 @@ public:
DebuggerControllerPatch * GetPatch(PTR_CORDB_ADDRESS_TYPE address)
{
SUPPORTS_DAC;
- ARM_ONLY(_ASSERTE(dac_cast<DWORD>(address) & THUMB_CODE));
+ ARM_ONLY(_ASSERTE(dac_cast<TADDR>(address) & THUMB_CODE));
DebuggerControllerPatch * pPatch =
dac_cast<PTR_DebuggerControllerPatch>(Find(HashAddress(address), (SIZE_T)(dac_cast<TADDR>(address))));
diff --git a/src/coreclr/debug/inc/arm64/primitives.h b/src/coreclr/debug/inc/arm64/primitives.h
index 05c03c7b3094..5f8b5262d993 100644
--- a/src/coreclr/debug/inc/arm64/primitives.h
+++ b/src/coreclr/debug/inc/arm64/primitives.h
@@ -153,9 +153,9 @@ inline void CORDbgSetInstruction(CORDB_ADDRESS_TYPE* address,
#if !defined(DBI_COMPILE) && !defined(DACCESS_COMPILE) && defined(HOST_OSX)
ExecutableWriterHolder<void> instructionWriterHolder((LPVOID)address, sizeof(PRD_TYPE));
- ULONGLONG ptraddr = dac_cast<ULONGLONG>(instructionWriterHolder.GetRW());
+ TADDR ptraddr = dac_cast<TADDR>(instructionWriterHolder.GetRW());
#else // !DBI_COMPILE && !DACCESS_COMPILE && HOST_OSX
- ULONGLONG ptraddr = dac_cast<ULONGLONG>(address);
+ TADDR ptraddr = dac_cast<TADDR>(address);
#endif // !DBI_COMPILE && !DACCESS_COMPILE && HOST_OSX
*(PRD_TYPE *)ptraddr = instruction;
FlushInstructionCache(GetCurrentProcess(),
@@ -167,7 +167,7 @@ inline PRD_TYPE CORDbgGetInstruction(UNALIGNED CORDB_ADDRESS_TYPE* address)
{
LIMITED_METHOD_CONTRACT;
- ULONGLONG ptraddr = dac_cast<ULONGLONG>(address);
+ TADDR ptraddr = dac_cast<TADDR>(address);
return *(PRD_TYPE *)ptraddr;
}
diff --git a/src/coreclr/debug/inc/loongarch64/primitives.h b/src/coreclr/debug/inc/loongarch64/primitives.h
index 97e4fb9541a2..b30e7dcdd2ea 100644
--- a/src/coreclr/debug/inc/loongarch64/primitives.h
+++ b/src/coreclr/debug/inc/loongarch64/primitives.h
@@ -135,7 +135,7 @@ inline void CORDbgSetInstruction(CORDB_ADDRESS_TYPE* address,
// In a DAC build, this function assumes the input is an host address.
LIMITED_METHOD_DAC_CONTRACT;
- ULONGLONG ptraddr = dac_cast<ULONGLONG>(address);
+ TADDR ptraddr = dac_cast<TADDR>(address);
*(PRD_TYPE *)ptraddr = instruction;
FlushInstructionCache(GetCurrentProcess(),
address,
@@ -146,7 +146,7 @@ inline PRD_TYPE CORDbgGetInstruction(UNALIGNED CORDB_ADDRESS_TYPE* address)
{
LIMITED_METHOD_CONTRACT;
- ULONGLONG ptraddr = dac_cast<ULONGLONG>(address);
+ TADDR ptraddr = dac_cast<TADDR>(address);
return *(PRD_TYPE *)ptraddr;
}
diff --git a/src/coreclr/debug/inc/riscv64/primitives.h b/src/coreclr/debug/inc/riscv64/primitives.h
index 066397fcda71..17ace22981c7 100644
--- a/src/coreclr/debug/inc/riscv64/primitives.h
+++ b/src/coreclr/debug/inc/riscv64/primitives.h
@@ -137,7 +137,7 @@ inline void CORDbgSetInstruction(CORDB_ADDRESS_TYPE* address,
// In a DAC build, this function assumes the input is an host address.
LIMITED_METHOD_DAC_CONTRACT;
- ULONGLONG ptraddr = dac_cast<ULONGLONG>(address);
+ TADDR ptraddr = dac_cast<TADDR>(address);
*(PRD_TYPE *)ptraddr = instruction;
FlushInstructionCache(GetCurrentProcess(),
address,
@@ -148,7 +148,7 @@ inline PRD_TYPE CORDbgGetInstruction(UNALIGNED CORDB_ADDRESS_TYPE* address)
{
LIMITED_METHOD_CONTRACT;
- ULONGLONG ptraddr = dac_cast<ULONGLONG>(address);
+ TADDR ptraddr = dac_cast<TADDR>(address);
return *(PRD_TYPE *)ptraddr;
}
diff --git a/src/coreclr/gc/env/common.h b/src/coreclr/gc/env/common.h
index 78562ef0438b..a3f6539aa3a4 100644
--- a/src/coreclr/gc/env/common.h
+++ b/src/coreclr/gc/env/common.h
@@ -22,6 +22,7 @@
#include <stdarg.h>
#include <memory.h>
#include <limits.h>
+#include <math.h>
#include <new>
diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp
index 0471326c0af5..4d281b16251a 100644
--- a/src/coreclr/gc/gc.cpp
+++ b/src/coreclr/gc/gc.cpp
@@ -2367,6 +2367,7 @@ int gc_heap::conserve_mem_setting = 0;
bool gc_heap::spin_count_unit_config_p = false;
uint64_t gc_heap::suspended_start_time = 0;
+uint64_t gc_heap::change_heap_count_time = 0;
uint64_t gc_heap::end_gc_time = 0;
uint64_t gc_heap::total_suspended_time = 0;
uint64_t gc_heap::process_start_time = 0;
@@ -22015,7 +22016,7 @@ void gc_heap::update_end_gc_time_per_heap()
if (heap_number == 0)
{
- dprintf (6666, ("prev gen%d GC end time: prev start %I64d + prev gc elapsed %Id = %I64d",
+ dprintf (3, ("prev gen%d GC end time: prev start %I64d + prev gc elapsed %Id = %I64d",
gen_number, dd_previous_time_clock (dd), dd_gc_elapsed_time (dd), (dd_previous_time_clock (dd) + dd_gc_elapsed_time (dd))));
}
@@ -22023,45 +22024,53 @@ void gc_heap::update_end_gc_time_per_heap()
if (heap_number == 0)
{
- dprintf (6666, ("updated NGC%d %Id elapsed time to %I64d - %I64d = %I64d", gen_number, dd_gc_clock (dd), end_gc_time, dd_time_clock (dd), dd_gc_elapsed_time (dd)));
+ dprintf (3, ("updated NGC%d %Id elapsed time to %I64d - %I64d = %I64d", gen_number, dd_gc_clock (dd), end_gc_time, dd_time_clock (dd), dd_gc_elapsed_time (dd)));
}
}
#ifdef DYNAMIC_HEAP_COUNT
if ((heap_number == 0) && (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes))
{
- dynamic_heap_count_data_t::sample& sample = dynamic_heap_count_data.samples[dynamic_heap_count_data.sample_index];
- sample.elapsed_between_gcs = end_gc_time - last_suspended_end_time;
- sample.gc_pause_time = dd_gc_elapsed_time (dynamic_data_of (0));
- sample.msl_wait_time = get_msl_wait_time();
+ if (settings.gc_index > 1)
+ {
+ dynamic_heap_count_data_t::sample& sample = dynamic_heap_count_data.samples[dynamic_heap_count_data.sample_index];
+ sample.elapsed_between_gcs = end_gc_time - last_suspended_end_time;
+ sample.gc_pause_time = dd_gc_elapsed_time (dynamic_data_of (0));
+ sample.msl_wait_time = get_msl_wait_time ();
+ // could cache this - we will get it again soon in do_post_gc
+ sample.gc_survived_size = get_total_promoted ();
- dprintf (6666, ("sample#%d: this GC end %I64d - last sus end %I64d = %I64d, this GC pause %I64d, msl wait %I64d",
- dynamic_heap_count_data.sample_index, end_gc_time, last_suspended_end_time, sample.elapsed_between_gcs, sample.gc_pause_time, sample.msl_wait_time));
+ dprintf (6666, ("sample#%d: this GC end %I64d - last sus end %I64d = %I64d, this GC pause %I64d, msl wait %I64d",
+ dynamic_heap_count_data.sample_index, end_gc_time, last_suspended_end_time, sample.elapsed_between_gcs, sample.gc_pause_time, sample.msl_wait_time));
- last_suspended_end_time = end_gc_time;
+ GCEventFireHeapCountSample_V1 (
+ (uint64_t)VolatileLoadWithoutBarrier (&settings.gc_index),
+ sample.elapsed_between_gcs,
+ sample.gc_pause_time,
+ sample.msl_wait_time);
- GCEventFireHeapCountSample_V1 (
- (uint64_t)VolatileLoadWithoutBarrier (&settings.gc_index),
- sample.elapsed_between_gcs,
- sample.gc_pause_time,
- sample.msl_wait_time);
+ dynamic_heap_count_data.sample_index = (dynamic_heap_count_data.sample_index + 1) % dynamic_heap_count_data_t::sample_size;
+ (dynamic_heap_count_data.current_samples_count)++;
- dynamic_heap_count_data.sample_index = (dynamic_heap_count_data.sample_index + 1) % dynamic_heap_count_data_t::sample_size;
+ if (settings.condemned_generation == max_generation)
+ {
+ gc_index_full_gc_end = dd_gc_clock (dynamic_data_of (0));
+ size_t elapsed_between_gen2_gcs = end_gc_time - prev_gen2_end_time;
+ size_t gen2_elapsed_time = sample.gc_pause_time;
+ dynamic_heap_count_data_t::gen2_sample& g2_sample = dynamic_heap_count_data.gen2_samples[dynamic_heap_count_data.gen2_sample_index];
+ g2_sample.gc_index = VolatileLoadWithoutBarrier (&(settings.gc_index));
+ g2_sample.gc_percent = (float)gen2_elapsed_time * 100.0f / elapsed_between_gen2_gcs;
+ (dynamic_heap_count_data.current_gen2_samples_count)++;
- if (settings.condemned_generation == max_generation)
- {
- gc_index_full_gc_end = dd_gc_clock (dynamic_data_of (0));
- size_t elapsed_between_gen2_gcs = end_gc_time - prev_gen2_end_time;
- size_t gen2_elapsed_time = sample.gc_pause_time;
- dynamic_heap_count_data.gen2_gc_percents[dynamic_heap_count_data.gen2_sample_index] = (float)gen2_elapsed_time * 100.0f / elapsed_between_gen2_gcs;
+ dprintf (6666, ("gen2 sample#%d: this GC end %I64d - last gen2 end %I64d = %I64d, GC elapsed %I64d, percent %.3f",
+ dynamic_heap_count_data.gen2_sample_index, end_gc_time, prev_gen2_end_time, elapsed_between_gen2_gcs, gen2_elapsed_time, g2_sample.gc_percent));
+ dynamic_heap_count_data.gen2_sample_index = (dynamic_heap_count_data.gen2_sample_index + 1) % dynamic_heap_count_data_t::sample_size;
+ }
- dprintf (6666, ("gen2 sample#%d: this GC end %I64d - last gen2 end %I64d = %I64d, GC elapsed %I64d, percent %.3f",
- dynamic_heap_count_data.gen2_sample_index, end_gc_time, prev_gen2_end_time, elapsed_between_gen2_gcs,
- gen2_elapsed_time, dynamic_heap_count_data.gen2_gc_percents[dynamic_heap_count_data.gen2_sample_index]));
- dynamic_heap_count_data.gen2_sample_index = (dynamic_heap_count_data.gen2_sample_index + 1) % dynamic_heap_count_data_t::sample_size;
+ calculate_new_heap_count ();
}
- calculate_new_heap_count ();
+ last_suspended_end_time = end_gc_time;
}
#endif //DYNAMIC_HEAP_COUNT
}
@@ -22228,11 +22237,16 @@ void gc_heap::gc1()
dprintf (6666, ("updating BGC %Id elapsed time to %I64d - %I64d = %I64d", dd_gc_clock (dd), end_gc_time, dd_time_clock (dd), dd_gc_elapsed_time (dd)));
float bgc_percent = (float)dd_gc_elapsed_time (dd) * 100.0f / (float)time_since_last_gen2;
- dynamic_heap_count_data.gen2_gc_percents[dynamic_heap_count_data.gen2_sample_index] = bgc_percent;
+ dynamic_heap_count_data_t::gen2_sample& g2_sample = dynamic_heap_count_data.gen2_samples[dynamic_heap_count_data.gen2_sample_index];
+ g2_sample.gc_index = VolatileLoadWithoutBarrier (&(settings.gc_index));
+ g2_sample.gc_percent = bgc_percent;
dprintf (6666, ("gen2 sample %d elapsed %Id * 100 / time inbetween gen2 %Id = %.3f",
dynamic_heap_count_data.gen2_sample_index, dd_gc_elapsed_time (dd), time_since_last_gen2, bgc_percent));
dynamic_heap_count_data.gen2_sample_index = (dynamic_heap_count_data.gen2_sample_index + 1) % dynamic_heap_count_data_t::sample_size;
+ (dynamic_heap_count_data.current_gen2_samples_count)++;
gc_index_full_gc_end = dd_gc_clock (dynamic_data_of (0));
+
+ calculate_new_heap_count ();
}
#endif //DYNAMIC_HEAP_COUNT
@@ -25075,7 +25089,6 @@ void gc_heap::recommission_heap()
// copy some fields from heap0
-
// this is copied to dd_previous_time_clock at the start of GC
dd_time_clock (dd) = dd_time_clock (heap0_dd);
@@ -25152,37 +25165,90 @@ float median_of_3 (float a, float b, float c)
return b;
}
-size_t gc_heap::get_num_completed_gcs ()
+float log_with_base (float x, float base)
{
- size_t num_completed_gcs = settings.gc_index;
-#ifdef BACKGROUND_GC
- if (g_heaps[0]->is_bgc_in_progress ())
+ assert (x > base);
+
+ return (float)(log(x) / log(base));
+}
+
+float mean (float* arr, int size)
+{
+ float sum = 0.0;
+
+ for (int i = 0; i < size; i++)
{
- num_completed_gcs--;
- dprintf (6666, ("BGC in prog, completed GCs -> %Id", num_completed_gcs));
+ sum += arr[i];
}
-#endif //BACKGROUND_GC
+ return (sum / size);
+}
+
+// Change it to a desired number if you want to print.
+int max_times_to_print_tcp = 0;
+
+// Return the slope, and the average values in the avg arg.
+float slope (float* y, int n, float* avg)
+{
+ assert (n > 0);
- return num_completed_gcs;
+ if (n == 1)
+ {
+ dprintf (6666, ("only 1 tcp: %.3f, no slope", y[0]));
+ *avg = y[0];
+ return 0.0;
+ }
+
+ int sum_x = 0;
+
+ for (int i = 0; i < n; i++)
+ {
+ sum_x += i;
+
+ if (max_times_to_print_tcp >= 0)
+ {
+ dprintf (6666, ("%.3f, ", y[i]));
+ }
+ }
+
+ float avg_x = (float)sum_x / n;
+ float avg_y = mean (y, n);
+ *avg = avg_y;
+
+ float numerator = 0.0;
+ float denominator = 0.0;
+
+ for (int i = 0; i < n; ++i)
+ {
+ numerator += ((float)i - avg_x) * (y[i] - avg_y);
+ denominator += ((float)i - avg_x) * (i - avg_x);
+ }
+
+ max_times_to_print_tcp--;
+
+ return (numerator / denominator);
}
int gc_heap::calculate_new_heap_count ()
{
assert (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes);
- size_t num_completed_gcs = get_num_completed_gcs ();
-
- dprintf (6666, ("current GC %Id(completed: %Id), prev completed GCs %Id, last full GC happened at index %Id",
- VolatileLoadWithoutBarrier (&settings.gc_index), num_completed_gcs, dynamic_heap_count_data.prev_num_completed_gcs, gc_index_full_gc_end));
+ dprintf (6666, ("current num of samples %Id (g2: %Id) prev processed %Id (g2: %Id), last full GC happened at index %Id",
+ dynamic_heap_count_data.current_samples_count, dynamic_heap_count_data.current_gen2_samples_count,
+ dynamic_heap_count_data.processed_samples_count, dynamic_heap_count_data.processed_gen2_samples_count, gc_index_full_gc_end));
- if (num_completed_gcs < (dynamic_heap_count_data.prev_num_completed_gcs + dynamic_heap_count_data_t::sample_size))
+ if ((dynamic_heap_count_data.current_samples_count < (dynamic_heap_count_data.processed_samples_count + dynamic_heap_count_data_t::sample_size)) &&
+ (dynamic_heap_count_data.current_gen2_samples_count < (dynamic_heap_count_data.processed_gen2_samples_count + dynamic_heap_count_data_t::sample_size)))
{
dprintf (6666, ("not enough GCs, skipping"));
return n_heaps;
}
+ bool process_eph_samples_p = (dynamic_heap_count_data.current_samples_count >= (dynamic_heap_count_data.processed_samples_count + dynamic_heap_count_data_t::sample_size));
+ bool process_gen2_samples_p = (dynamic_heap_count_data.current_gen2_samples_count >= (dynamic_heap_count_data.processed_gen2_samples_count + dynamic_heap_count_data_t::sample_size));
+
+ size_t current_gc_index = VolatileLoadWithoutBarrier (&settings.gc_index);
float median_gen2_tcp_percent = 0.0f;
- if (gc_index_full_gc_end >= (settings.gc_index - dynamic_heap_count_data_t::sample_size))
+ if (dynamic_heap_count_data.current_gen2_samples_count >= (dynamic_heap_count_data.processed_gen2_samples_count + dynamic_heap_count_data_t::sample_size))
{
median_gen2_tcp_percent = dynamic_heap_count_data.get_median_gen2_gc_percent ();
}
@@ -25202,6 +25268,43 @@ int gc_heap::calculate_new_heap_count ()
}
float median_throughput_cost_percent = median_of_3 (throughput_cost_percents[0], throughput_cost_percents[1], throughput_cost_percents[2]);
+ float avg_throughput_cost_percent = (float)((throughput_cost_percents[0] + throughput_cost_percents[1] + throughput_cost_percents[2]) / 3.0);
+
+ // One of the reasons for outliers is something temporarily affected GC work. We pick the min tcp if the survival is very stable to avoid counting these outliers.
+ float min_tcp = throughput_cost_percents[0];
+ size_t min_survived = dynamic_heap_count_data.samples[0].gc_survived_size;
+ uint64_t min_pause = dynamic_heap_count_data.samples[0].gc_pause_time;
+ for (int i = 1; i < dynamic_heap_count_data_t::sample_size; i++)
+ {
+ min_tcp = min (throughput_cost_percents[i], min_tcp);
+ min_survived = min (dynamic_heap_count_data.samples[i].gc_survived_size, min_survived);
+ min_pause = min (dynamic_heap_count_data.samples[i].gc_pause_time, min_pause);
+ }
+
+ dprintf (6666, ("checking if samples are stable %Id %Id %Id, min tcp %.3f, min pause %I64d",
+ dynamic_heap_count_data.samples[0].gc_survived_size, dynamic_heap_count_data.samples[1].gc_survived_size, dynamic_heap_count_data.samples[2].gc_survived_size,
+ min_tcp, min_pause));
+
+ bool survived_stable_p = true;
+ if (min_survived > 0)
+ {
+ for (int i = 0; i < dynamic_heap_count_data_t::sample_size; i++)
+ {
+ dynamic_heap_count_data_t::sample& sample = dynamic_heap_count_data.samples[i];
+ float diff = (float)(sample.gc_survived_size - min_survived) / (float)min_survived;
+ dprintf (6666, ("sample %d diff from min is %Id -> %.3f", i, (sample.gc_survived_size - min_survived), diff));
+ if (diff >= 0.15)
+ {
+ survived_stable_p = false;
+ }
+ }
+ }
+
+ if (survived_stable_p)
+ {
+ dprintf (6666, ("survived is stable, so we pick min tcp %.3f", min_tcp));
+ median_throughput_cost_percent = min_tcp;
+ }
// apply exponential smoothing and use 1/3 for the smoothing factor
const float smoothing = 3;
@@ -25216,10 +25319,13 @@ int gc_heap::calculate_new_heap_count ()
smoothed_median_throughput_cost_percent = median_throughput_cost_percent;
}
- dprintf (6666, ("median tcp: %.3f, smoothed tcp: %.3f, gen2 tcp %.3f(%.3f, %.3f, %.3f)",
- median_throughput_cost_percent, smoothed_median_throughput_cost_percent, median_gen2_tcp_percent,
- dynamic_heap_count_data.gen2_gc_percents[0], dynamic_heap_count_data.gen2_gc_percents[1], dynamic_heap_count_data.gen2_gc_percents[2]));
+ dprintf (6666, ("median tcp: %.3f, smoothed tcp: %.3f, avg tcp: %.3f, gen2 tcp %.3f(%.3f, %.3f, %.3f)",
+ median_throughput_cost_percent, smoothed_median_throughput_cost_percent, avg_throughput_cost_percent, median_gen2_tcp_percent,
+ dynamic_heap_count_data.gen2_samples[0].gc_percent, dynamic_heap_count_data.gen2_samples[1].gc_percent, dynamic_heap_count_data.gen2_samples[2].gc_percent));
+ //
+ // I'm keeping the old logic for now just to handle gen2.
+ //
size_t heap_size = 0;
for (int i = 0; i < n_heaps; i++)
{
@@ -25247,7 +25353,9 @@ int gc_heap::calculate_new_heap_count ()
// we don't go all the way to the number of CPUs, but stay 1 or 2 short
int step_up = (n_heaps + 1) / 2;
int extra_heaps = 1 + (n_max_heaps >= 32);
- step_up = min (step_up, n_max_heaps - extra_heaps - n_heaps);
+ int actual_n_max_heaps = n_max_heaps - extra_heaps;
+ int max_growth = max ((n_max_heaps / 4), 2);
+ step_up = min (step_up, (actual_n_max_heaps - n_heaps));
// on the way down, we essentially divide the heap count by 1.5
int step_down = (n_heaps + 1) / 3;
@@ -25285,49 +25393,310 @@ int gc_heap::calculate_new_heap_count ()
dprintf (6666, ("stress %d -> %d", n_heaps, new_n_heaps));
#else //STRESS_DYNAMIC_HEAP_COUNT
int new_n_heaps = n_heaps;
- if (median_throughput_cost_percent > 10.0f)
- {
- // ramp up more agressively - use as many heaps as it would take to bring
- // the tcp down to 5%
- new_n_heaps = (int)(n_heaps * (median_throughput_cost_percent / 5.0));
- dprintf (6666, ("[CHP0] tcp %.3f -> %d * %.3f = %d", median_throughput_cost_percent, n_heaps, (median_throughput_cost_percent / 5.0), new_n_heaps));
- new_n_heaps = min (new_n_heaps, n_max_heaps - extra_heaps);
- }
- // if the median tcp is 10% or less, react slower
- else if ((smoothed_median_throughput_cost_percent > 5.0f) || (median_gen2_tcp_percent > 10.0f))
+
+ // target_tcp should be configurable.
+ float target_tcp = 5.0;
+ float target_gen2_tcp = 10.0;
+ float log_base = (float)1.1;
+
+ dynamic_heap_count_data.add_to_recorded_tcp (median_throughput_cost_percent);
+
+ // This is the average of whatever is in the recorded tcp buffer.
+ float avg_recorded_tcp = 0.0;
+
+ if (process_eph_samples_p)
{
- if (smoothed_median_throughput_cost_percent > 5.0f)
+ dynamic_heap_count_data.last_processed_stcp = smoothed_median_throughput_cost_percent;
+
+ if ((median_throughput_cost_percent > 10.0f) || (smoothed_median_throughput_cost_percent > target_tcp))
{
- dprintf (6666, ("[CHP1] stcp %.3f > 5, %d + %d = %d", smoothed_median_throughput_cost_percent, n_heaps, step_up, (n_heaps + step_up)));
+ // If median is high but stcp is lower than target, and if this situation continues, stcp will quickly be above target anyway; otherwise
+ // we treat it as an outlier.
+ if (smoothed_median_throughput_cost_percent > target_tcp)
+ {
+ float step_up_percent = log_with_base ((smoothed_median_throughput_cost_percent - target_tcp + log_base), log_base);
+ float step_up_float = (float)(step_up_percent / 100.0 * actual_n_max_heaps);
+ int step_up_int = (int)step_up_float;
+
+ dprintf (6666, ("[CHP0] inc %d(%.3f), last inc %d, %Id GCs elapsed, last stcp %.3f",
+ step_up_int, step_up_float, (int)dynamic_heap_count_data.last_changed_count,
+ (current_gc_index - dynamic_heap_count_data.last_changed_gc_index), dynamic_heap_count_data.last_changed_stcp));
+
+ // Don't adjust if we just adjusted last time we checked, unless we are in an extreme situation.
+ if ((smoothed_median_throughput_cost_percent < 20.0f) &&
+ (avg_throughput_cost_percent < 20.0f) &&
+ ((current_gc_index - dynamic_heap_count_data.last_changed_gc_index) < (2 * dynamic_heap_count_data_t::sample_size)))
+ {
+ dprintf (6666, ("[CHP0] we just adjusted %Id GCs ago, skipping", (current_gc_index - dynamic_heap_count_data.last_changed_gc_index)));
+ }
+ else
+ {
+ if (step_up_int)
+ {
+ if (dynamic_heap_count_data.dec_failure_count)
+ {
+ dprintf (6666, ("[CHP0] intending to grow, reset dec failure count (was %d)", dynamic_heap_count_data.dec_failure_count));
+ dynamic_heap_count_data.dec_failure_count = 0;
+ }
+
+ if (((int)dynamic_heap_count_data.last_changed_count > 0) && (dynamic_heap_count_data.last_changed_gc_index > 0.0) &&
+ ((current_gc_index - dynamic_heap_count_data.last_changed_gc_index) <= (3 * dynamic_heap_count_data_t::sample_size)))
+ {
+ dprintf (6666, ("[CHP0-0] just grew %d GCs ago, no change", (current_gc_index - dynamic_heap_count_data.last_changed_gc_index)));
+ step_up_int = 0;
+ }
+ else
+ {
+ // If the calculation tells us to grow, we should check to see if the slope has been coming down rapidly, if so there's no reason to grow.
+ int above_target_tcp_count = dynamic_heap_count_data.rearrange_recorded_tcp ();
+ float above_target_tcp_slope = slope (dynamic_heap_count_data.recorded_tcp_rearranged, above_target_tcp_count, &avg_recorded_tcp);
+ float diff_pct = (target_tcp - avg_recorded_tcp) / target_tcp;
+ float adjusted_target_tcp = dynamic_heap_count_data.get_range_upper (target_tcp);
+
+ dprintf (6666, ("[CHP0] slope of last %d samples is %.3f. avg %.3f (%.3f%%), current tcp %.3f, adjusted target is %.3f, failure count is %d",
+ above_target_tcp_count, above_target_tcp_slope, avg_recorded_tcp, (diff_pct * 100.0),
+ median_throughput_cost_percent, adjusted_target_tcp, dynamic_heap_count_data.inc_failure_count));
+
+ if (dynamic_heap_count_data.is_tcp_in_range (diff_pct, above_target_tcp_slope))
+ {
+ step_up_int = 0;
+ dprintf (6666, ("[CHP0-1] slope %.3f and already close to target %.3f (%.3f%%), no change", above_target_tcp_slope, avg_recorded_tcp, (diff_pct * 100.0)));
+ }
+ else
+ {
+ if (above_target_tcp_slope < 0.0)
+ {
+ // If we are already trending down and the tcp is small enough, just wait.
+ if ((median_throughput_cost_percent < adjusted_target_tcp) || (avg_recorded_tcp < adjusted_target_tcp))
+ {
+ step_up_int = 0;
+ dprintf (6666, ("[CHP0-2] trending down, slope is %.3f, tcp is %.3f, avg is %.3f, already below adjusted target %.3f, no change",
+ above_target_tcp_slope, median_throughput_cost_percent, avg_recorded_tcp, adjusted_target_tcp));
+ }
+ }
+ else
+ {
+ // We are trending up, but we have too few samples and the avg is already small enough.
+ if ((above_target_tcp_count <= dynamic_heap_count_data.inc_recheck_threshold) && (avg_recorded_tcp < adjusted_target_tcp))
+ {
+ step_up_int = 0;
+ dprintf (6666, ("[CHP0-3] trending up, only %d samples, slope is %.3f, avg is %.3f already below adjusted target %.3f, no change",
+ above_target_tcp_count, above_target_tcp_slope, avg_recorded_tcp, adjusted_target_tcp));
+ }
+ }
+ }
+ }
+
+ // If we still decided to grow, check if we need to grow aggressively.
+ if (step_up_int)
+ {
+ if (((int)dynamic_heap_count_data.last_changed_count > 0) && (dynamic_heap_count_data.last_changed_gc_index > 0.0))
+ {
+ (dynamic_heap_count_data.inc_failure_count)++;
+ dprintf (6666, ("[CHP0-4] just grew %d GCs ago, grow more aggressively from %d -> %d more heaps",
+ (current_gc_index - dynamic_heap_count_data.last_changed_gc_index), step_up_int, (step_up_int * (dynamic_heap_count_data.inc_failure_count + 1))));
+ step_up_int *= dynamic_heap_count_data.inc_failure_count + 1;
+ }
+ }
+ }
+
+ step_up_int = min (step_up_int, max_growth);
+
+ new_n_heaps = n_heaps + step_up_int;
+ new_n_heaps = min (new_n_heaps, actual_n_max_heaps);
+
+ // If we are going to grow to be very close to max heap, it's better to just grow to it.
+ if ((new_n_heaps < actual_n_max_heaps) && dynamic_heap_count_data.is_close_to_max (new_n_heaps, actual_n_max_heaps))
+ {
+ dprintf (6666, ("[CHP0-5] %d is close to max heaps %d, grow to max", new_n_heaps, actual_n_max_heaps));
+ new_n_heaps = actual_n_max_heaps;
+ }
+
+ if (new_n_heaps > n_heaps)
+ {
+ dynamic_heap_count_data.last_changed_gc_index = current_gc_index;
+ dynamic_heap_count_data.last_changed_count = step_up_float;
+ dynamic_heap_count_data.last_changed_stcp = smoothed_median_throughput_cost_percent;
+ }
+
+ dprintf (6666, ("[CHP0] tcp %.3f, stcp %.3f -> (%d * %.3f%% = %.3f) -> %d + %d = %d -> %d",
+ median_throughput_cost_percent, smoothed_median_throughput_cost_percent,
+ actual_n_max_heaps, step_up_percent, step_up_float, step_up_int, n_heaps, (n_heaps + step_up_int), new_n_heaps));
+ }
+ }
}
else
{
- dprintf (6666, ("[CHP2] tcp %.3f > 10, %d + %d = %d", median_gen2_tcp_percent, n_heaps, step_up, (n_heaps + step_up)));
+ // When we are below target, we accumulate the distance to target and only adjust when we've accumulated enough in this state. Note that
+ // this can include tcp's that are slightly above target, as long as it's not high enough for us to adjust the heap count. If we are just
+ // oscillating around target, this makes those tcp's cancel each other out.
+ if (dynamic_heap_count_data.below_target_accumulation == 0)
+ {
+ dynamic_heap_count_data.first_below_target_gc_index = current_gc_index;
+ dynamic_heap_count_data.init_recorded_tcp ();
+ dynamic_heap_count_data.add_to_recorded_tcp (median_throughput_cost_percent);
+ }
+ dprintf (6666, ("[CHP1] last time adjusted %s by %d at GC#%Id (%Id GCs since), stcp was %.3f, now stcp is %.3f",
+ ((dynamic_heap_count_data.last_changed_count > 0.0) ? "up" : "down"), (int)dynamic_heap_count_data.last_changed_count,
+ dynamic_heap_count_data.last_changed_gc_index, (current_gc_index - dynamic_heap_count_data.last_changed_gc_index),
+ dynamic_heap_count_data.last_changed_stcp, smoothed_median_throughput_cost_percent));
+
+ float below_target_diff = target_tcp - median_throughput_cost_percent;
+ dynamic_heap_count_data.below_target_accumulation += below_target_diff;
+
+ dprintf (6666, ("[CHP1] below target for the past %Id GCs, accumulated %.3f, min (10%% of max is %.2f, 20%% of hc is %.2f)",
+ (current_gc_index - dynamic_heap_count_data.first_below_target_gc_index), dynamic_heap_count_data.below_target_accumulation,
+ (actual_n_max_heaps * 0.1), (n_heaps * 0.2)));
+
+ if (dynamic_heap_count_data.below_target_accumulation >= dynamic_heap_count_data.below_target_threshold)
+ {
+ int below_target_tcp_count = dynamic_heap_count_data.rearrange_recorded_tcp ();
+ float below_target_tcp_slope = slope (dynamic_heap_count_data.recorded_tcp, below_target_tcp_count, &avg_recorded_tcp);
+ float diff_pct = (target_tcp - smoothed_median_throughput_cost_percent) / target_tcp;
+ int step_down_int = (int)(diff_pct / 2.0 * n_heaps);
+ dprintf (6666, ("[CHP1] observed %d tcp's <= or ~ target, avg %.3f, slope %.3f, stcp %.3f below target, shrink by %.3f * %d = %d heaps",
+ below_target_tcp_count, avg_recorded_tcp, below_target_tcp_slope, (diff_pct * 100.0), (diff_pct * 50.0), n_heaps, step_down_int));
+
+ bool shrink_p = false;
+ if (dynamic_heap_count_data.is_tcp_in_range (diff_pct, below_target_tcp_slope))
+ {
+ step_down_int = 0;
+ dprintf (6666, ("[CHP1-0] slope %.3f is flat and stcp is already close to target %.3f (%.3f%%), no change",
+ below_target_tcp_slope, smoothed_median_throughput_cost_percent, (diff_pct * 100.0)));
+ }
+ else
+ {
+ // If we adjusted last time and it was unsuccessful, we need to inc our failure count.
+ // If we have a non zero failure count, we don't want to adjust for a while if we continue to be in that same situation.
+ bool last_dec_p = (dynamic_heap_count_data.last_changed_gc_index > 0) && (dynamic_heap_count_data.last_changed_count < 0.0);
+ float last_dec_tcp_diff_pct = (last_dec_p ?
+ ((smoothed_median_throughput_cost_percent - dynamic_heap_count_data.last_changed_stcp) / dynamic_heap_count_data.last_changed_stcp) : 0.0f);
+ bool stable_p = last_dec_p && ((last_dec_tcp_diff_pct <= 0.2) && (last_dec_tcp_diff_pct >= -0.2));
+ dprintf (6666, ("[CHP1] since last adjustment stcp changed %.3f->%.3f = %.3f%%, %s, dec_failure_count is %d",
+ dynamic_heap_count_data.last_changed_stcp, smoothed_median_throughput_cost_percent, (last_dec_tcp_diff_pct * 100.0),
+ (stable_p ? "stable" : "not stable"), dynamic_heap_count_data.dec_failure_count));
+
+ bool check_dec_p = true;
+
+ if (stable_p)
+ {
+ if (dynamic_heap_count_data.dec_failure_count)
+ {
+ (dynamic_heap_count_data.dec_failure_count)++;
+ }
+ else
+ {
+ dynamic_heap_count_data.dec_failure_count = 1;
+ }
+
+ if (dynamic_heap_count_data.dec_failure_count <= dynamic_heap_count_data.dec_failure_recheck_threshold)
+ {
+ check_dec_p = false;
+ dprintf (6666, ("[CHP1-1] dec was still unsuccessful, <= %d, no change", dynamic_heap_count_data.dec_failure_recheck_threshold));
+ }
+ }
+
+ if (check_dec_p)
+ {
+ dynamic_heap_count_data.dec_failure_count = 0;
+
+ if (below_target_tcp_slope <= 0.0)
+ {
+ shrink_p = true;
+ }
+ else
+ {
+ // It's trending upwards, but if takes too many samples to get to target, we do want to shrink.
+ int num_samples_to_goal = (int)((target_tcp + below_target_tcp_slope - median_throughput_cost_percent) / below_target_tcp_slope);
+ bool far_below_goal_p = (num_samples_to_goal > (3 * dynamic_heap_count_data_t::sample_size));
+ dprintf (6666, ("[CHP1] it'll take ((%.3f + %.3f - %.3f) / %.3f = %d) samples to get to target, %s",
+ target_tcp, below_target_tcp_slope, median_throughput_cost_percent, below_target_tcp_slope,
+ num_samples_to_goal, (far_below_goal_p ? "shrink" : "no change")));
+
+ if (far_below_goal_p)
+ {
+ // We could be in a situation where the slope changes directions but since we only compute one number, we take another look at
+ // the samples to make a better assessment by looking at the highest tcps and if their average is close to target, we don't shrink.
+ //
+ // TODO - we only check this when the slope is going up but since this includes the situation where the slope changes directions
+ // we should really be checking this regardless of the slope to handle that.
+ float highest_avg_tcp = 0.0;
+ int highest_count = dynamic_heap_count_data.highest_avg_recorded_tcp (below_target_tcp_count, avg_recorded_tcp, &highest_avg_tcp);
+ float highest_count_pct = (float)highest_count / (float)below_target_tcp_count;
+
+ shrink_p = (highest_count_pct < 0.3) || (highest_avg_tcp < (target_tcp * 0.8));
+ dprintf (6666, ("[CHP1-2] %d samples were above avg (%.3f%%), their avg is %.3f (%s)",
+ highest_count, (highest_count_pct * 100.0), highest_avg_tcp, (shrink_p ? "shrink" : "no change")));
+ }
+ }
+ }
+ }
+
+ if (shrink_p && step_down_int && (new_n_heaps > step_down_int))
+ {
+ // TODO - if we see that it wants to shrink by 1 heap too many times, we do want to shrink.
+ if (step_down_int == 1)
+ {
+ step_down_int = 0;
+ dprintf (6666, ("[CHP1-3] don't shrink if it's just one heap. not worth it"));
+ }
+
+ new_n_heaps -= step_down_int;
+ dprintf (6666, ("[CHP1] shrink by %d heaps -> %d", step_down_int, new_n_heaps));
+ }
+
+ // Always reinit the buffer as we want to look at the more recent history.
+ dynamic_heap_count_data.init_recorded_tcp ();
+ dynamic_heap_count_data.below_target_accumulation = 0;
+ }
+
+ if (new_n_heaps < n_heaps)
+ {
+ dynamic_heap_count_data.last_changed_gc_index = current_gc_index;
+ dynamic_heap_count_data.last_changed_count = (float)(new_n_heaps - n_heaps);
+ dynamic_heap_count_data.last_changed_stcp = smoothed_median_throughput_cost_percent;
+ dprintf (6666, ("[CHP1] setting last changed gc index to %Id, count to %.3f, stcp to %.3f",
+ dynamic_heap_count_data.last_changed_gc_index, dynamic_heap_count_data.last_changed_count, dynamic_heap_count_data.last_changed_stcp));
+
+ if (dynamic_heap_count_data.inc_failure_count)
+ {
+ dprintf (6666, ("[CHP1] shrink, reset inc failure count (was %d)", dynamic_heap_count_data.inc_failure_count));
+ dynamic_heap_count_data.inc_failure_count = 0;
+ }
+ }
}
- new_n_heaps += step_up;
- }
- // if we can save at least 1% more in time than we spend in space, increase number of heaps
- else if ((tcp_reduction_per_step_up - scp_increase_per_step_up) >= 1.0f)
- {
- dprintf (6666, ("[CHP3] % .3f - % .3f = % .3f, % d + % d = % d",
- tcp_reduction_per_step_up, scp_increase_per_step_up, (tcp_reduction_per_step_up - scp_increase_per_step_up),
- n_heaps, step_up, (n_heaps + step_up)));
- new_n_heaps += step_up;
}
- // if we can save at least 1% more in space than we spend in time, decrease number of heaps
- else if ((smoothed_median_throughput_cost_percent < 1.0f) &&
- (median_gen2_tcp_percent < 5.0f) &&
- ((scp_decrease_per_step_down - tcp_increase_per_step_down) >= 1.0f))
+
+ if ((new_n_heaps == n_heaps) && !process_eph_samples_p && process_gen2_samples_p)
{
- dprintf (6666, ("[CHP4] stcp %.3f tcp %.3f, %.3f - %.3f = %.3f, %d + %d = %d",
- smoothed_median_throughput_cost_percent, median_gen2_tcp_percent,
- scp_decrease_per_step_down, tcp_increase_per_step_down, (scp_decrease_per_step_down - tcp_increase_per_step_down),
- n_heaps, step_up, (n_heaps + step_up)));
- new_n_heaps -= step_down;
+ // The gen2 samples only serve as a backstop so this is quite crude.
+ if (median_gen2_tcp_percent > target_gen2_tcp)
+ {
+ float step_up_percent = log_with_base ((median_gen2_tcp_percent - target_gen2_tcp + log_base), log_base);
+ float step_up_float = (float)(step_up_percent / 100.0 * actual_n_max_heaps);
+ new_n_heaps += (int)step_up_float;
+ new_n_heaps = min (new_n_heaps, actual_n_max_heaps);
+ dprintf (6666, ("[CHP2-0] gen2 tcp: %.3f, inc by %.3f%% = %d, %d -> %d", median_gen2_tcp_percent, step_up_percent, (int)step_up_float, n_heaps, new_n_heaps));
+
+ if ((new_n_heaps < actual_n_max_heaps) && dynamic_heap_count_data.is_close_to_max (new_n_heaps, actual_n_max_heaps))
+ {
+ dprintf (6666, ("[CHP2-1] %d is close to max heaps %d, grow to max", new_n_heaps, actual_n_max_heaps));
+ new_n_heaps = actual_n_max_heaps;
+ }
+ }
+ else if ((dynamic_heap_count_data.last_processed_stcp < 1.0) &&
+ (median_gen2_tcp_percent < (target_gen2_tcp / 2)) &&
+ (scp_decrease_per_step_down - tcp_increase_per_step_down >= 1.0f))
+ {
+ new_n_heaps -= step_down;
+ dprintf (6666, ("[CHP3-0] last eph stcp: %.3f, gen2 tcp: %.3f, dec by %d, %d -> %d",
+ dynamic_heap_count_data.last_processed_stcp, median_gen2_tcp_percent, step_down, n_heaps, new_n_heaps));
+ }
}
assert (new_n_heaps >= 1);
- assert (new_n_heaps <= n_max_heaps);
+ assert (new_n_heaps <= actual_n_max_heaps);
+
#endif //STRESS_DYNAMIC_HEAP_COUNT
// store data used for decision to emit in ETW event
@@ -25350,13 +25719,28 @@ int gc_heap::calculate_new_heap_count ()
dynamic_heap_count_data.scp_decrease_per_step_down
);
- dynamic_heap_count_data.prev_num_completed_gcs = num_completed_gcs;
+ if (process_eph_samples_p)
+ {
+ dprintf (6666, ("processed eph samples, updating processed %Id -> %Id", dynamic_heap_count_data.processed_samples_count, dynamic_heap_count_data.current_samples_count));
+ dynamic_heap_count_data.processed_samples_count = dynamic_heap_count_data.current_samples_count;
+ }
+
+ if (process_gen2_samples_p)
+ {
+ dprintf (6666, ("processed gen2 samples, updating processed %Id -> %Id", dynamic_heap_count_data.processed_gen2_samples_count, dynamic_heap_count_data.current_gen2_samples_count));
+ dynamic_heap_count_data.processed_gen2_samples_count = dynamic_heap_count_data.current_gen2_samples_count;
+ }
if (new_n_heaps != n_heaps)
{
- dprintf (6666, ("should change! %d->%d", n_heaps, new_n_heaps));
+ dprintf (6666, ("GC#%Id should change! %d->%d (%s)",
+ VolatileLoadWithoutBarrier (&settings.gc_index), n_heaps, new_n_heaps, ((n_heaps < new_n_heaps) ? "INC" : "DEC")));
dynamic_heap_count_data.heap_count_to_change_to = new_n_heaps;
dynamic_heap_count_data.should_change_heap_count = true;
+ dynamic_heap_count_data.init_recorded_tcp ();
+ dynamic_heap_count_data.below_target_accumulation = 0;
+ dynamic_heap_count_data.first_below_target_gc_index = current_gc_index;
+ dprintf (6666, ("CHANGING HC, resetting tcp index, below target"));
}
return new_n_heaps;
@@ -25389,7 +25773,7 @@ void gc_heap::check_heap_count ()
if (dynamic_heap_count_data.new_n_heaps != n_heaps)
{
- dprintf (6666, ("prep to change from %d to %d", n_heaps, dynamic_heap_count_data.new_n_heaps));
+ dprintf (6666, ("prep to change from %d to %d at GC#%Id", n_heaps, dynamic_heap_count_data.new_n_heaps, VolatileLoadWithoutBarrier (&settings.gc_index)));
if (!prepare_to_change_heap_count (dynamic_heap_count_data.new_n_heaps))
{
// we don't have sufficient resources - reset the new heap count
@@ -25399,11 +25783,15 @@ void gc_heap::check_heap_count ()
if (dynamic_heap_count_data.new_n_heaps == n_heaps)
{
- // heap count stays the same, no work to do
- dynamic_heap_count_data.prev_num_completed_gcs = get_num_completed_gcs ();
+ dynamic_heap_count_data.last_changed_gc_index = 0;
+ dynamic_heap_count_data.last_changed_count = 0.0;
+
+ dynamic_heap_count_data.processed_samples_count = dynamic_heap_count_data.current_samples_count;
+ dynamic_heap_count_data.processed_gen2_samples_count = dynamic_heap_count_data.current_gen2_samples_count;
dynamic_heap_count_data.should_change_heap_count = false;
- dprintf (6666, ("heap count stays the same %d, no work to do, set prev completed to %Id", dynamic_heap_count_data.new_n_heaps, dynamic_heap_count_data.prev_num_completed_gcs));
+ dprintf (6666, ("heap count stays the same %d, no work to do, set processed sample count to %Id",
+ dynamic_heap_count_data.new_n_heaps, dynamic_heap_count_data.current_samples_count));
return;
}
@@ -25443,17 +25831,14 @@ void gc_heap::check_heap_count ()
int old_n_heaps = n_heaps;
- (dynamic_heap_count_data.heap_count_change_count)++;
change_heap_count (dynamic_heap_count_data.new_n_heaps);
GCToEEInterface::RestartEE(TRUE);
dprintf (9999, ("h0 restarted EE"));
- // we made changes to the heap count that will change the overhead,
- // so change the smoothed overhead to reflect that
- dynamic_heap_count_data.smoothed_median_throughput_cost_percent = dynamic_heap_count_data.smoothed_median_throughput_cost_percent / n_heaps * old_n_heaps;
+ dynamic_heap_count_data.smoothed_median_throughput_cost_percent = 0.0;
- dprintf (6666, ("h0 finished changing, set should change to false!"));
+ dprintf (6666, ("h0 finished changing, set should change to false!\n"));
dynamic_heap_count_data.should_change_heap_count = false;
}
@@ -25593,6 +25978,8 @@ bool gc_heap::prepare_to_change_heap_count (int new_n_heaps)
bool gc_heap::change_heap_count (int new_n_heaps)
{
+ uint64_t start_time = 0;
+
dprintf (9999, ("BEG heap%d changing %d->%d", heap_number, n_heaps, new_n_heaps));
// use this variable for clarity - n_heaps will change during the transition
@@ -25617,11 +26004,9 @@ bool gc_heap::change_heap_count (int new_n_heaps)
assert (dynamic_heap_count_data.new_n_heaps != old_n_heaps);
- dprintf (9999, ("Waiting h0 heap%d changing %d->%d", heap_number, n_heaps, new_n_heaps));
-
if (heap_number == 0)
{
- dprintf (3, ("switching heap count from %d to %d heaps", old_n_heaps, new_n_heaps));
+ start_time = GetHighPrecisionTimeStamp ();
// spread finalization data out to heaps coming into service
// if this step fails, we can still continue
@@ -25827,6 +26212,7 @@ bool gc_heap::change_heap_count (int new_n_heaps)
gc_t_join.restart ();
}
}
+
#ifdef BACKGROUND_GC
// there should be no items in the bgc_alloc_lock
bgc_alloc_lock->check();
@@ -25837,23 +26223,31 @@ bool gc_heap::change_heap_count (int new_n_heaps)
{
// compute the total budget per generation over the old heaps
// and figure out what the new budget per heap is
- ptrdiff_t budget_per_heap[total_generation_count];
+ ptrdiff_t new_alloc_per_heap[total_generation_count];
+ size_t desired_alloc_per_heap[total_generation_count];
for (int gen_idx = 0; gen_idx < total_generation_count; gen_idx++)
{
- ptrdiff_t total_budget = 0;
+ ptrdiff_t total_new_alloc = 0;
+ size_t total_desired_alloc = 0;
for (int i = 0; i < old_n_heaps; i++)
{
gc_heap* hp = g_heaps[i];
dynamic_data* dd = hp->dynamic_data_of (gen_idx);
- total_budget += dd_new_allocation (dd);
+ total_new_alloc += dd_new_allocation (dd);
+ total_desired_alloc += dd_desired_allocation (dd);
}
// distribute the total budget for this generation over all new heaps if we are increasing heap count,
// but keep the budget per heap if we are decreasing heap count
int max_n_heaps = max (old_n_heaps, new_n_heaps);
- budget_per_heap[gen_idx] = Align (total_budget/max_n_heaps, get_alignment_constant (gen_idx <= max_generation));
-
- dprintf (6666, ("g%d: total budget: %zd budget per heap: %zd", gen_idx, total_budget, budget_per_heap[gen_idx]));
+ new_alloc_per_heap[gen_idx] = Align (total_new_alloc / max_n_heaps, get_alignment_constant (gen_idx <= max_generation));
+ desired_alloc_per_heap[gen_idx] = Align (total_desired_alloc / max_n_heaps, get_alignment_constant (gen_idx <= max_generation));
+ size_t allocated_in_budget = total_desired_alloc - total_new_alloc;
+ dprintf (6666, ("g%d: total budget %zd (%zd / heap), left in budget: %zd (%zd / heap), (allocated %Id, %.3f%%), min %zd",
+ gen_idx, total_desired_alloc, desired_alloc_per_heap[gen_idx],
+ total_new_alloc, new_alloc_per_heap[gen_idx],
+ allocated_in_budget, ((double)allocated_in_budget * 100.0 / (double)total_desired_alloc),
+ dd_min_size (g_heaps[0]->dynamic_data_of (gen_idx))));
}
// distribute the new budget per heap over the new heaps
@@ -25864,10 +26258,10 @@ bool gc_heap::change_heap_count (int new_n_heaps)
for (int gen_idx = 0; gen_idx < total_generation_count; gen_idx++)
{
- // distribute the total budget over all heaps, but don't go below the min budget
+ // distribute the total leftover budget over all heaps.
dynamic_data* dd = hp->dynamic_data_of (gen_idx);
- dd_new_allocation (dd) = max (budget_per_heap[gen_idx], (ptrdiff_t)dd_min_size (dd));
- dd_desired_allocation (dd) = dd_new_allocation (dd);
+ dd_new_allocation (dd) = new_alloc_per_heap[gen_idx];
+ dd_desired_allocation (dd) = max (desired_alloc_per_heap[gen_idx], dd_min_size (dd));
// recompute dd_fragmentation and dd_current_size
generation* gen = hp->generation_of (gen_idx);
@@ -25876,10 +26270,11 @@ bool gc_heap::change_heap_count (int new_n_heaps)
assert (gen_size >= dd_fragmentation (dd));
dd_current_size (dd) = gen_size - dd_fragmentation (dd);
- dprintf (6666, ("h%d g%d: new allocation: %zd generation_size: %zd fragmentation: %zd current_size: %zd",
+ dprintf (3, ("h%d g%d: budget: %zd, left in budget: %zd, %zd generation_size: %zd fragmentation: %zd current_size: %zd",
i,
gen_idx,
- dd_new_allocation (dd),
+ desired_alloc_per_heap[gen_idx],
+ new_alloc_per_heap[gen_idx],
gen_size,
dd_fragmentation (dd),
dd_current_size (dd)));
@@ -25916,6 +26311,11 @@ bool gc_heap::change_heap_count (int new_n_heaps)
}
}
+ if (heap_number == 0)
+ {
+ change_heap_count_time = GetHighPrecisionTimeStamp() - start_time;
+ }
+
return true;
}
@@ -48405,6 +48805,10 @@ HRESULT GCHeap::Initialize()
// This needs to be different from our initial heap count so we can make sure we wait for
// the idle threads correctly in gc_thread_function.
gc_heap::dynamic_heap_count_data.last_n_heaps = 0;
+ // This should be adjusted based on the target tcp. See comments in gcpriv.h
+ gc_heap::dynamic_heap_count_data.below_target_threshold = 10.0;
+ gc_heap::dynamic_heap_count_data.inc_recheck_threshold = 5;
+ gc_heap::dynamic_heap_count_data.dec_failure_recheck_threshold = 5;
}
#endif //DYNAMIC_HEAP_COUNT
GCScan::GcRuntimeStructuresValid (TRUE);
diff --git a/src/coreclr/gc/gcpriv.h b/src/coreclr/gc/gcpriv.h
index 71dc19b9f0c6..788cbff9f5e5 100644
--- a/src/coreclr/gc/gcpriv.h
+++ b/src/coreclr/gc/gcpriv.h
@@ -2556,8 +2556,6 @@ private:
// re-initialize a heap in preparation to putting it back into service
PER_HEAP_METHOD void recommission_heap();
- PER_HEAP_ISOLATED_METHOD size_t get_num_completed_gcs();
-
PER_HEAP_ISOLATED_METHOD int calculate_new_heap_count();
// check if we should change the heap count
@@ -4238,21 +4236,166 @@ private:
struct dynamic_heap_count_data_t
{
static const int sample_size = 3;
+ static const int recorded_tcp_array_size = 64;
struct sample
{
uint64_t elapsed_between_gcs; // time between gcs in microseconds (this should really be between_pauses)
uint64_t gc_pause_time; // pause time for this GC
uint64_t msl_wait_time;
+ size_t gc_survived_size;
};
uint32_t sample_index;
sample samples[sample_size];
- size_t prev_num_completed_gcs;
+
+ size_t current_samples_count;
+ size_t processed_samples_count;
+
+ //
+ // We need to observe the history of tcp's so record them in a small buffer.
+ //
+ float recorded_tcp_rearranged[recorded_tcp_array_size];
+ float recorded_tcp[recorded_tcp_array_size];
+ int recorded_tcp_index;
+ int total_recorded_tcp;
+
+ int add_to_recorded_tcp (float tcp)
+ {
+ total_recorded_tcp++;
+
+ recorded_tcp[recorded_tcp_index] = tcp;
+ recorded_tcp_index++;
+ if (recorded_tcp_index == recorded_tcp_array_size)
+ {
+ recorded_tcp_index = 0;
+ }
+
+ return recorded_tcp_index;
+ }
+
+ int rearrange_recorded_tcp ()
+ {
+ int count = recorded_tcp_array_size;
+ int copied_count = 0;
+
+ if (total_recorded_tcp >= recorded_tcp_array_size)
+ {
+ int earlier_entry_size = recorded_tcp_array_size - recorded_tcp_index;
+ memcpy (recorded_tcp_rearranged, (recorded_tcp + recorded_tcp_index), (earlier_entry_size * sizeof (float)));
+
+ copied_count = earlier_entry_size;
+ }
+
+ if (recorded_tcp_index)
+ {
+ memcpy ((recorded_tcp_rearranged + copied_count), recorded_tcp, (recorded_tcp_index * sizeof (float)));
+ copied_count += recorded_tcp_index;
+ }
+
+ return copied_count;
+ }
+
+ int highest_avg_recorded_tcp (int count, float avg, float* highest_avg)
+ {
+ float highest_sum = 0.0;
+ int highest_count = 0;
+
+ for (int i = 0; i < count; i++)
+ {
+ if (recorded_tcp_rearranged[i] > avg)
+ {
+ highest_count++;
+ highest_sum += recorded_tcp_rearranged[i];
+ }
+ }
+
+ if (highest_count)
+ {
+ *highest_avg = highest_sum / highest_count;
+ }
+
+ return highest_count;
+ }
+
+ void init_recorded_tcp ()
+ {
+ total_recorded_tcp = 0;
+ recorded_tcp_index = 0;
+ dprintf (6666, ("INIT tcp buffer"));
+ }
+
+ int get_recorded_tcp_count () { return total_recorded_tcp; }
+
+ //
+ // Maintain some info about last time we did change heap count.
+ //
+ size_t last_changed_gc_index;
+ // This is intentionally kept as a float for precision.
+ float last_changed_count;
+ float last_changed_stcp;
+
+ //
+ // For tuning above/below target tcp.
+ //
+ // If we just increased the heap count and immediately need to grow again, that counts as a failure.
+ // The higher the failure count, the more aggressive we should grow.
+ int inc_failure_count;
+
+ // If we are trending up and the tcp is already close enough to target, we need this many samples
+ // before we adjust.
+ int inc_recheck_threshold;
+
+ // If we shrink and the stcp doesn't change much, that counts as a failure. For the below target case
+ // it's fine to stay here for a while. Either it'll naturally change and break out of this situation
+ // or we wait for a while before we re-evaluate. How long we wait is defined by dec_recheck_threshold
+ // each time our calculation tells us to shrink.
+ int dec_failure_count;
+ int dec_failure_recheck_threshold;
+
+ // If we continue to be below target for an extended period of time, ie, we've accumulated more than
+ // below_target_threshold, we want to reduce the heap count.
+ float below_target_accumulation;
+ float below_target_threshold;
+
+ // Currently only used for dprintf.
+ size_t first_below_target_gc_index;
+
+ float get_range_upper (float t)
+ {
+ return (t * 1.2f);
+ }
+
+ bool is_tcp_in_range (float diff_pct, float slope)
+ {
+ return ((diff_pct <= 0.2) && (diff_pct >= -0.2) && (slope <= 0.1) && (slope >= -0.1));
+ }
+
+ bool is_close_to_max (int new_n, int max)
+ {
+ return ((max - new_n) <= (max / 10));
+ }
+
+ //
+ // gen2 GCs are handled separately only as a backstop.
+ //
+ struct gen2_sample
+ {
+ // Recording the gen2 GC indices so we know how far apart they are. Currently unused
+ // but we should consider how much value there is if they are very far apart.
+ size_t gc_index;
+ // This is (gc_elapsed_time / time inbetween this and the last gen2 GC)
+ float gc_percent;
+ };
uint32_t gen2_sample_index;
- // This is (gc_elapsed_time / time inbetween this and the last gen2 GC)
- float gen2_gc_percents[sample_size];
+ gen2_sample gen2_samples[sample_size];
+
+ size_t current_gen2_samples_count;
+ size_t processed_gen2_samples_count;
+
+ // This records the stcp last time we processed ephemeral samples. We use it
+ float last_processed_stcp;
float median_throughput_cost_percent; // estimated overhead of allocator + gc
float smoothed_median_throughput_cost_percent; // exponentially smoothed version
@@ -4271,14 +4414,13 @@ private:
bool should_change_heap_count;
int heap_count_to_change_to;
- int heap_count_change_count;
#ifdef STRESS_DYNAMIC_HEAP_COUNT
int lowest_heap_with_msl_uoh;
#endif //STRESS_DYNAMIC_HEAP_COUNT
float get_median_gen2_gc_percent()
{
- return median_of_3 (gen2_gc_percents[0], gen2_gc_percents[1], gen2_gc_percents[2]);
+ return median_of_3 (gen2_samples[0].gc_percent, gen2_samples[1].gc_percent, gen2_samples[2].gc_percent);
}
};
PER_HEAP_ISOLATED_FIELD_MAINTAINED dynamic_heap_count_data_t dynamic_heap_count_data;
@@ -4475,6 +4617,9 @@ private:
// at the beginning of a BGC and the PM triggered full GCs
// fall into this case.
PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t suspended_start_time;
+ // Right now this is diag only but may be used functionally later.
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t change_heap_count_time;
+ // TEMP END
PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t end_gc_time;
PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t total_suspended_time;
PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t process_start_time;
diff --git a/src/coreclr/inc/check.h b/src/coreclr/inc/check.h
index c1ac08016d83..6951e2a41837 100644
--- a/src/coreclr/inc/check.h
+++ b/src/coreclr/inc/check.h
@@ -684,6 +684,9 @@ CHECK CheckAligned(UINT value, UINT alignment);
CHECK CheckAligned(ULONG value, UINT alignment);
#endif
CHECK CheckAligned(UINT64 value, UINT alignment);
+#ifdef __APPLE__
+CHECK CheckAligned(SIZE_T value, UINT alignment);
+#endif
CHECK CheckAligned(const void *address, UINT alignment);
CHECK CheckOverflow(UINT value1, UINT value2);
@@ -691,6 +694,9 @@ CHECK CheckOverflow(UINT value1, UINT value2);
CHECK CheckOverflow(ULONG value1, ULONG value2);
#endif
CHECK CheckOverflow(UINT64 value1, UINT64 value2);
+#ifdef __APPLE__
+CHECK CheckOverflow(SIZE_T value1, SIZE_T value2);
+#endif
CHECK CheckOverflow(PTR_CVOID address, UINT offset);
#if defined(_MSC_VER)
CHECK CheckOverflow(const void *address, ULONG offset);
@@ -702,11 +708,17 @@ CHECK CheckUnderflow(UINT value1, UINT value2);
CHECK CheckUnderflow(ULONG value1, ULONG value2);
#endif
CHECK CheckUnderflow(UINT64 value1, UINT64 value2);
+#ifdef __APPLE__
+CHECK CheckUnderflow(SIZE_T value1, SIZE_T value2);
+#endif
CHECK CheckUnderflow(const void *address, UINT offset);
#if defined(_MSC_VER)
CHECK CheckUnderflow(const void *address, ULONG offset);
#endif
CHECK CheckUnderflow(const void *address, UINT64 offset);
+#ifdef __APPLE__
+CHECK CheckUnderflow(const void *address, SIZE_T offset);
+#endif
CHECK CheckUnderflow(const void *address, void *address2);
CHECK CheckZeroedMemory(const void *memory, SIZE_T size);
diff --git a/src/coreclr/inc/check.inl b/src/coreclr/inc/check.inl
index 9296c48f7a7a..34a2956d1be6 100644
--- a/src/coreclr/inc/check.inl
+++ b/src/coreclr/inc/check.inl
@@ -156,6 +156,15 @@ inline CHECK CheckAligned(UINT64 value, UINT alignment)
CHECK_OK;
}
+#ifdef __APPLE__
+inline CHECK CheckAligned(SIZE_T value, UINT alignment)
+{
+ STATIC_CONTRACT_WRAPPER;
+ CHECK(AlignmentTrim(value, alignment) == 0);
+ CHECK_OK;
+}
+#endif
+
inline CHECK CheckAligned(const void *address, UINT alignment)
{
STATIC_CONTRACT_WRAPPER;
@@ -183,6 +192,14 @@ inline CHECK CheckOverflow(UINT64 value1, UINT64 value2)
CHECK_OK;
}
+#ifdef __APPLE__
+inline CHECK CheckOverflow(SIZE_T value1, SIZE_T value2)
+{
+ CHECK(value1 + value2 >= value1);
+ CHECK_OK;
+}
+#endif
+
inline CHECK CheckOverflow(PTR_CVOID address, UINT offset)
{
TADDR targetAddr = dac_cast<TADDR>(address);
@@ -254,6 +271,15 @@ inline CHECK CheckUnderflow(UINT64 value1, UINT64 value2)
CHECK_OK;
}
+#ifdef __APPLE__
+inline CHECK CheckUnderflow(SIZE_T value1, SIZE_T value2)
+{
+ CHECK(value1 - value2 <= value1);
+
+ CHECK_OK;
+}
+#endif
+
inline CHECK CheckUnderflow(const void *address, UINT offset)
{
#if POINTER_BITS == 32
@@ -290,6 +316,20 @@ inline CHECK CheckUnderflow(const void *address, UINT64 offset)
CHECK_OK;
}
+#ifdef __APPLE__
+inline CHECK CheckUnderflow(const void *address, SIZE_T offset)
+{
+#if POINTER_BITS == 32
+ CHECK(offset >> 32 == 0);
+ CHECK((UINT) (SIZE_T) address - (UINT) offset <= (UINT) (SIZE_T) address);
+#else
+ CHECK((UINT64) address - offset <= (UINT64) address);
+#endif
+
+ CHECK_OK;
+}
+#endif
+
inline CHECK CheckUnderflow(const void *address, void *address2)
{
#if POINTER_BITS == 32
diff --git a/src/coreclr/inc/clr_std/type_traits b/src/coreclr/inc/clr_std/type_traits
index 12af99d5c4fe..ba007c32d9fe 100644
--- a/src/coreclr/inc/clr_std/type_traits
+++ b/src/coreclr/inc/clr_std/type_traits
@@ -358,7 +358,7 @@ namespace std
// On Unix 'long' is a 64-bit type (same as __int64) and the following two definitions
// conflict with _Is_integral<unsigned __int64> and _Is_integral<signed __int64>.
-#ifndef HOST_UNIX
+#if !defined(HOST_UNIX) || defined(__APPLE__)
template<>
struct _Is_integral<unsigned long>
: true_type
@@ -370,7 +370,7 @@ namespace std
: true_type
{ // determine whether _Ty is integral
};
-#endif /* HOST_UNIX */
+#endif /* !HOST_UNIX || __APPLE__ */
#if _HAS_CHAR16_T_LANGUAGE_SUPPORT
template<>
diff --git a/src/coreclr/inc/clrconfignocache.h b/src/coreclr/inc/clrconfignocache.h
index f75504a2289a..01675a24201d 100644
--- a/src/coreclr/inc/clrconfignocache.h
+++ b/src/coreclr/inc/clrconfignocache.h
@@ -46,6 +46,8 @@ public:
{
return false;
}
+
+ result = (DWORD)rawResult;
bool fSuccess = endPtr != _value;
return fSuccess;
}
diff --git a/src/coreclr/inc/clrconfigvalues.h b/src/coreclr/inc/clrconfigvalues.h
index 30956bf4a674..d9571f077645 100644
--- a/src/coreclr/inc/clrconfigvalues.h
+++ b/src/coreclr/inc/clrconfigvalues.h
@@ -259,7 +259,7 @@ RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_legacyCorruptedStateExceptionsPolicy, W("le
CONFIG_DWORD_INFO(INTERNAL_SuppressLostExceptionTypeAssert, W("SuppressLostExceptionTypeAssert"), 0, "")
RETAIL_CONFIG_DWORD_INFO(INTERNAL_UseEntryPointFilter, W("UseEntryPointFilter"), 0, "")
RETAIL_CONFIG_DWORD_INFO(INTERNAL_Corhost_Swallow_Uncaught_Exceptions, W("Corhost_Swallow_Uncaught_Exceptions"), 0, "")
-RETAIL_CONFIG_DWORD_INFO(EXTERNAL_EnableNewExceptionHandling, W("EnableNewExceptionHandling"), 0, "Enable new exception handling.");
+RETAIL_CONFIG_DWORD_INFO(EXTERNAL_LegacyExceptionHandling, W("LegacyExceptionHandling"), 1, "Enable legacy exception handling.");
///
diff --git a/src/coreclr/inc/clrnt.h b/src/coreclr/inc/clrnt.h
index 56245ea46f25..137375386280 100644
--- a/src/coreclr/inc/clrnt.h
+++ b/src/coreclr/inc/clrnt.h
@@ -184,12 +184,17 @@ RtlVirtualUnwind_Unsafe(
#ifdef HOST_X86
typedef struct _RUNTIME_FUNCTION {
DWORD BeginAddress;
+ // NOTE: R2R doesn't include EndAddress (see docs/design/coreclr/botr/readytorun-format.md).
+ // NativeAOT does include the EndAddress because the Microsoft linker expects it. In NativeAOT
+ // the info is generated in the managed ObjectWriter, so the structures don't have to match.
+ // DWORD EndAddress;
DWORD UnwindData;
} RUNTIME_FUNCTION, *PRUNTIME_FUNCTION;
typedef struct _DISPATCHER_CONTEXT {
_EXCEPTION_REGISTRATION_RECORD* RegistrationPointer;
} DISPATCHER_CONTEXT, *PDISPATCHER_CONTEXT;
+
#endif // HOST_X86
#endif // !HOST_UNIX
@@ -207,7 +212,7 @@ RtlpGetFunctionEndAddress (
_In_ TADDR ImageBase
)
{
- PTR_UNWIND_INFO pUnwindInfo = (PTR_UNWIND_INFO)(ImageBase + FunctionEntry->UnwindData);
+ PUNWIND_INFO pUnwindInfo = (PUNWIND_INFO)(ImageBase + FunctionEntry->UnwindData);
return FunctionEntry->BeginAddress + pUnwindInfo->FunctionLength;
}
diff --git a/src/coreclr/inc/clrtypes.h b/src/coreclr/inc/clrtypes.h
index 19e9720b34d9..9094e4932a25 100644
--- a/src/coreclr/inc/clrtypes.h
+++ b/src/coreclr/inc/clrtypes.h
@@ -370,6 +370,15 @@ inline UINT64 AlignDown(UINT64 value, UINT alignment)
return (value&~(UINT64)(alignment-1));
}
+#ifdef __APPLE__
+inline SIZE_T AlignDown(SIZE_T value, UINT alignment)
+{
+ STATIC_CONTRACT_LEAF;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+ return (value&~(SIZE_T)(alignment-1));
+}
+#endif // __APPLE__
+
inline UINT AlignmentPad(UINT value, UINT alignment)
{
STATIC_CONTRACT_WRAPPER;
diff --git a/src/coreclr/inc/corinfo.h b/src/coreclr/inc/corinfo.h
index 111c6488df07..5fad5e4b2429 100644
--- a/src/coreclr/inc/corinfo.h
+++ b/src/coreclr/inc/corinfo.h
@@ -572,7 +572,10 @@ enum CorInfoHelpFunc
CORINFO_HELP_INIT_PINVOKE_FRAME, // initialize an inlined PInvoke Frame for the JIT-compiler
CORINFO_HELP_MEMSET, // Init block of memory
+ CORINFO_HELP_MEMZERO, // Init block of memory with zeroes
CORINFO_HELP_MEMCPY, // Copy block of memory
+ CORINFO_HELP_NATIVE_MEMSET, // Init block of memory using native memset (not safe for pDst being null,
+ // not safe for unbounded size, does not trigger GC)
CORINFO_HELP_RUNTIMEHANDLE_METHOD, // determine a type/field/method handle at run-time
CORINFO_HELP_RUNTIMEHANDLE_METHOD_LOG, // determine a type/field/method handle at run-time, with IBC logging
diff --git a/src/coreclr/inc/daccess.h b/src/coreclr/inc/daccess.h
index 699947a02cdd..7783fde153f9 100644
--- a/src/coreclr/inc/daccess.h
+++ b/src/coreclr/inc/daccess.h
@@ -614,8 +614,7 @@ struct DacTableHeader
// Define TADDR as a non-pointer value so use of it as a pointer
// will not work properly. Define it as unsigned so
// pointer comparisons aren't affected by sign.
-// This requires special casting to ULONG64 to sign-extend if necessary.
-typedef ULONG_PTR TADDR;
+typedef uintptr_t TADDR;
// TSIZE_T used for counts or ranges that need to span the size of a
// target pointer. For cross-plat, this may be different than SIZE_T
@@ -2128,7 +2127,7 @@ inline void DACCOP_IGNORE(DacCopWarningCode code, const char * szReasonString)
// Declare TADDR as a non-pointer type so that arithmetic
// can be done on it directly, as with the DACCESS_COMPILE definition.
// This also helps expose pointer usage that may need to be changed.
-typedef ULONG_PTR TADDR;
+typedef uintptr_t TADDR;
typedef void* PTR_VOID;
typedef LPVOID* PTR_PTR_VOID;
diff --git a/src/coreclr/inc/gcinfo.h b/src/coreclr/inc/gcinfo.h
index 66933b10f044..2a6ba1914f0b 100644
--- a/src/coreclr/inc/gcinfo.h
+++ b/src/coreclr/inc/gcinfo.h
@@ -15,22 +15,21 @@
#include "daccess.h"
#include "windef.h" // For BYTE
-// Some declarations in this file are used on non-x86 platforms, but most are x86-specific.
-
// Use the lower 2 bits of the offsets stored in the tables
// to encode properties
const unsigned OFFSET_MASK = 0x3; // mask to access the low 2 bits
//
-// Note for untracked locals the flags allowed are "pinned" and "byref"
-// and for tracked locals the flags allowed are "this" and "byref"
// Note that these definitions should also match the definitions of
// GC_CALL_INTERIOR and GC_CALL_PINNED in VM/gc.h
//
const unsigned byref_OFFSET_FLAG = 0x1; // the offset is an interior ptr
const unsigned pinned_OFFSET_FLAG = 0x2; // the offset is a pinned ptr
-#if !defined(TARGET_X86) || !defined(FEATURE_EH_FUNCLETS)
+#if defined(TARGET_X86) && !defined(FEATURE_EH_FUNCLETS)
+// JIT32_ENCODER has additional restriction on x86 without funclets:
+// - for untracked locals the flags allowed are "pinned" and "byref"
+// - for tracked locals the flags allowed are "this" and "byref"
const unsigned this_OFFSET_FLAG = 0x2; // the offset is "this"
#endif
diff --git a/src/coreclr/inc/jiteeversionguid.h b/src/coreclr/inc/jiteeversionguid.h
index 646f1b169330..11675936acfa 100644
--- a/src/coreclr/inc/jiteeversionguid.h
+++ b/src/coreclr/inc/jiteeversionguid.h
@@ -43,11 +43,11 @@ typedef const GUID *LPCGUID;
#define GUID_DEFINED
#endif // !GUID_DEFINED
-constexpr GUID JITEEVersionIdentifier = { /* 1f30d12b-38f1-4f1e-a08a-831def882aa4 */
- 0x1f30d12b,
- 0x38f1,
- 0x4f1e,
- {0xa0, 0x8a, 0x83, 0x1d, 0xef, 0x88, 0x2a, 0xa4}
+constexpr GUID JITEEVersionIdentifier = { /* 86eab154-5d93-4fad-bc07-e94fd9268b70 */
+ 0x86eab154,
+ 0x5d93,
+ 0x4fad,
+ {0xbc, 0x07, 0xe9, 0x4f, 0xd9, 0x26, 0x8b, 0x70}
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/src/coreclr/inc/jithelpers.h b/src/coreclr/inc/jithelpers.h
index 65167abd6a4d..a0982f3ac652 100644
--- a/src/coreclr/inc/jithelpers.h
+++ b/src/coreclr/inc/jithelpers.h
@@ -235,13 +235,10 @@
DYNAMICJITHELPER(CORINFO_HELP_INIT_PINVOKE_FRAME, NULL, CORINFO_HELP_SIG_REG_ONLY)
#endif
-#ifdef TARGET_X86
- JITHELPER(CORINFO_HELP_MEMSET, NULL, CORINFO_HELP_SIG_CANNOT_USE_ALIGN_STUB)
- JITHELPER(CORINFO_HELP_MEMCPY, NULL, CORINFO_HELP_SIG_CANNOT_USE_ALIGN_STUB)
-#else
- JITHELPER(CORINFO_HELP_MEMSET, JIT_MemSet, CORINFO_HELP_SIG_REG_ONLY)
- JITHELPER(CORINFO_HELP_MEMCPY, JIT_MemCpy, CORINFO_HELP_SIG_REG_ONLY)
-#endif
+ DYNAMICJITHELPER(CORINFO_HELP_MEMSET, NULL, CORINFO_HELP_SIG_REG_ONLY)
+ DYNAMICJITHELPER(CORINFO_HELP_MEMZERO, NULL, CORINFO_HELP_SIG_REG_ONLY)
+ DYNAMICJITHELPER(CORINFO_HELP_MEMCPY, NULL, CORINFO_HELP_SIG_REG_ONLY)
+ JITHELPER(CORINFO_HELP_NATIVE_MEMSET, Jit_NativeMemSet, CORINFO_HELP_SIG_REG_ONLY)
// Generics
JITHELPER(CORINFO_HELP_RUNTIMEHANDLE_METHOD, JIT_GenericHandleMethod, CORINFO_HELP_SIG_REG_ONLY)
diff --git a/src/coreclr/inc/readytorun.h b/src/coreclr/inc/readytorun.h
index b3128cb00e4b..41a4aa251fa7 100644
--- a/src/coreclr/inc/readytorun.h
+++ b/src/coreclr/inc/readytorun.h
@@ -20,7 +20,7 @@
// If you update this, ensure you run `git grep MINIMUM_READYTORUN_MAJOR_VERSION`
// and handle pending work.
#define READYTORUN_MAJOR_VERSION 0x0009
-#define READYTORUN_MINOR_VERSION 0x0001
+#define READYTORUN_MINOR_VERSION 0x0002
#define MINIMUM_READYTORUN_MAJOR_VERSION 0x009
@@ -33,6 +33,8 @@
// R2R Version 8.0 Changes the alignment of the Int128 type
// R2R Version 9.0 adds support for the Vector512 type
// R2R Version 9.1 adds new helpers to allocate objects on frozen segments
+// R2R Version 9.2 adds MemZero and NativeMemSet helpers
+
struct READYTORUN_CORE_HEADER
{
@@ -325,7 +327,9 @@ enum ReadyToRunHelper
READYTORUN_HELPER_Stelem_Ref = 0x38,
READYTORUN_HELPER_Ldelema_Ref = 0x39,
- READYTORUN_HELPER_MemSet = 0x40,
+ READYTORUN_HELPER_MemZero = 0x3E,
+ READYTORUN_HELPER_MemSet = 0x3F,
+ READYTORUN_HELPER_NativeMemSet = 0x40,
READYTORUN_HELPER_MemCpy = 0x41,
// PInvoke helpers
@@ -441,10 +445,6 @@ enum ReadyToRunHelper
READYTORUN_HELPER_StackProbe = 0x111,
READYTORUN_HELPER_GetCurrentManagedThreadId = 0x112,
-
- // Array helpers for use with native ints
- READYTORUN_HELPER_Stelem_Ref_I = 0x113,
- READYTORUN_HELPER_Ldelema_Ref_I = 0x114,
};
#include "readytoruninstructionset.h"
diff --git a/src/coreclr/inc/readytorunhelpers.h b/src/coreclr/inc/readytorunhelpers.h
index 8691f9b9cb8c..bbb586e8eb4a 100644
--- a/src/coreclr/inc/readytorunhelpers.h
+++ b/src/coreclr/inc/readytorunhelpers.h
@@ -29,6 +29,8 @@ HELPER(READYTORUN_HELPER_Stelem_Ref, CORINFO_HELP_ARRADDR_ST,
HELPER(READYTORUN_HELPER_Ldelema_Ref, CORINFO_HELP_LDELEMA_REF, )
HELPER(READYTORUN_HELPER_MemSet, CORINFO_HELP_MEMSET, )
+HELPER(READYTORUN_HELPER_MemZero, CORINFO_HELP_MEMZERO, )
+HELPER(READYTORUN_HELPER_NativeMemSet, CORINFO_HELP_NATIVE_MEMSET, )
HELPER(READYTORUN_HELPER_MemCpy, CORINFO_HELP_MEMCPY, )
HELPER(READYTORUN_HELPER_LogMethodEnter, CORINFO_HELP_BBT_FCN_ENTER, )
diff --git a/src/coreclr/inc/regdisp.h b/src/coreclr/inc/regdisp.h
index 4832791ebfa5..91a4305f9c66 100644
--- a/src/coreclr/inc/regdisp.h
+++ b/src/coreclr/inc/regdisp.h
@@ -318,7 +318,7 @@ struct REGDISPLAY : public REGDISPLAY_BASE {
memset(this, 0, sizeof(REGDISPLAY));
// Setup the pointer to ControlPC field
- pPC = &ControlPC;
+ pPC = (DWORD *)&ControlPC;
}
};
diff --git a/src/coreclr/jit/CMakeLists.txt b/src/coreclr/jit/CMakeLists.txt
index ae08a27e4c00..5ba50306d1b7 100644
--- a/src/coreclr/jit/CMakeLists.txt
+++ b/src/coreclr/jit/CMakeLists.txt
@@ -23,6 +23,8 @@ function(create_standalone_jit)
if(TARGETDETAILS_OS STREQUAL "unix_osx" OR TARGETDETAILS_OS STREQUAL "unix_anyos")
set(JIT_ARCH_LINK_LIBRARIES gcinfo_unix_${TARGETDETAILS_ARCH})
+ elseif(TARGETDETAILS_OS STREQUAL "win_aot")
+ set(JIT_ARCH_LINK_LIBRARIES gcinfo_win_${TARGETDETAILS_ARCH})
else()
set(JIT_ARCH_LINK_LIBRARIES gcinfo_${TARGETDETAILS_OS}_${TARGETDETAILS_ARCH})
endif()
@@ -94,7 +96,6 @@ set( JIT_SOURCES
bitset.cpp
block.cpp
buildstring.cpp
- layout.cpp
codegencommon.cpp
codegenlinear.cpp
compiler.cpp
@@ -123,14 +124,15 @@ set( JIT_SOURCES
gentree.cpp
gschecks.cpp
hashbv.cpp
- hwintrinsic.cpp
+ helperexpansion.cpp
hostallocator.cpp
+ hwintrinsic.cpp
ifconversion.cpp
- helperexpansion.cpp
- indirectcalltransformer.cpp
- importercalls.cpp
importer.cpp
+ importercalls.cpp
importervectorization.cpp
+ indirectcalltransformer.cpp
+ inductionvariableopts.cpp
inline.cpp
inlinepolicy.cpp
instr.cpp
@@ -138,6 +140,7 @@ set( JIT_SOURCES
jiteh.cpp
jithashtable.cpp
jitmetadata.cpp
+ layout.cpp
lclmorph.cpp
lclvars.cpp
likelyclass.cpp
@@ -152,7 +155,6 @@ set( JIT_SOURCES
objectalloc.cpp
optcse.cpp
optimizebools.cpp
- switchrecognition.cpp
optimizer.cpp
patchpoint.cpp
phase.cpp
@@ -165,6 +167,7 @@ set( JIT_SOURCES
regalloc.cpp
registerargconvention.cpp
regset.cpp
+ scev.cpp
scopeinfo.cpp
sideeffects.cpp
sm.cpp
@@ -173,6 +176,7 @@ set( JIT_SOURCES
ssabuilder.cpp
ssarenamestate.cpp
stacklevelsetter.cpp
+ switchrecognition.cpp
treelifeupdater.cpp
unwind.cpp
utils.cpp
@@ -359,6 +363,7 @@ set( JIT_HEADERS
registerargconvention.h
register.h
regset.h
+ scev.h
sideeffects.h
simd.h
simdashwintrinsic.h
@@ -649,6 +654,7 @@ else()
create_standalone_jit(TARGET clrjit_universal_arm_${ARCH_HOST_NAME} OS universal ARCH arm DESTINATIONS .)
target_compile_definitions(clrjit_universal_arm_${ARCH_HOST_NAME} PRIVATE ARM_SOFTFP CONFIGURABLE_ARM_ABI)
create_standalone_jit(TARGET clrjit_win_x86_${ARCH_HOST_NAME} OS win ARCH x86 DESTINATIONS .)
+ create_standalone_jit(TARGET clrjit_win_aot_x86_${ARCH_HOST_NAME} OS win_aot ARCH x86 DESTINATIONS .)
endif (CLR_CMAKE_TARGET_ARCH_RISCV64)
if (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX)
diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp
index ba25a60af8ed..5d69c8d1bdd9 100644
--- a/src/coreclr/jit/assertionprop.cpp
+++ b/src/coreclr/jit/assertionprop.cpp
@@ -5493,7 +5493,6 @@ GenTree* Compiler::optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree,
case GT_IND:
case GT_STOREIND:
case GT_NULLCHECK:
- case GT_STORE_DYN_BLK:
return optAssertionProp_Ind(assertions, tree, stmt);
case GT_BOUNDS_CHECK:
@@ -6147,7 +6146,7 @@ ASSERT_TP* Compiler::optComputeAssertionGen()
AssertionIndex valueAssertionIndex;
AssertionIndex jumpDestAssertionIndex;
- if (info.IsNextEdgeAssertion())
+ if (info.AssertionHoldsOnFalseEdge())
{
valueAssertionIndex = info.GetAssertionIndex();
jumpDestAssertionIndex = optFindComplementary(info.GetAssertionIndex());
diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp
index 273c6f045123..bd9abbe7fef3 100644
--- a/src/coreclr/jit/block.cpp
+++ b/src/coreclr/jit/block.cpp
@@ -69,6 +69,24 @@ unsigned SsaStressHashHelper()
#endif
//------------------------------------------------------------------------
+// setLikelihood: set the likelihood of aflow edge
+//
+// Arguments:
+// likelihood -- value in range [0.0, 1.0] indicating how likely
+// the source block is to transfer control along this edge.
+//
+void FlowEdge::setLikelihood(weight_t likelihood)
+{
+ assert(likelihood >= 0.0);
+ assert(likelihood <= 1.0);
+ m_likelihoodSet = true;
+ m_likelihood = likelihood;
+
+ JITDUMP("setting likelihood of " FMT_BB " -> " FMT_BB " to " FMT_WT "\n", m_sourceBlock->bbNum, m_destBlock->bbNum,
+ m_likelihood);
+}
+
+//------------------------------------------------------------------------
// AllSuccessorEnumerator: Construct an instance of the enumerator.
//
// Arguments:
@@ -676,11 +694,11 @@ void BasicBlock::dspKind() const
break;
case BBJ_EHFILTERRET:
- printf(" -> %s (fltret)", dspBlockNum(bbTarget));
+ printf(" -> %s (fltret)", dspBlockNum(GetTarget()));
break;
case BBJ_EHCATCHRET:
- printf(" -> %s (cret)", dspBlockNum(bbTarget));
+ printf(" -> %s (cret)", dspBlockNum(GetTarget()));
break;
case BBJ_THROW:
@@ -694,28 +712,28 @@ void BasicBlock::dspKind() const
case BBJ_ALWAYS:
if (HasFlag(BBF_KEEP_BBJ_ALWAYS))
{
- printf(" -> %s (ALWAYS)", dspBlockNum(bbTarget));
+ printf(" -> %s (ALWAYS)", dspBlockNum(GetTarget()));
}
else
{
- printf(" -> %s (always)", dspBlockNum(bbTarget));
+ printf(" -> %s (always)", dspBlockNum(GetTarget()));
}
break;
case BBJ_LEAVE:
- printf(" -> %s (leave)", dspBlockNum(bbTarget));
+ printf(" -> %s (leave)", dspBlockNum(GetTarget()));
break;
case BBJ_CALLFINALLY:
- printf(" -> %s (callf)", dspBlockNum(bbTarget));
+ printf(" -> %s (callf)", dspBlockNum(GetTarget()));
break;
case BBJ_CALLFINALLYRET:
- printf(" -> %s (callfr)", dspBlockNum(bbTarget));
+ printf(" -> %s (callfr)", dspBlockNum(GetTarget()));
break;
case BBJ_COND:
- printf(" -> %s,%s (cond)", dspBlockNum(bbTrueTarget), dspBlockNum(bbFalseTarget));
+ printf(" -> %s,%s (cond)", dspBlockNum(GetTrueTarget()), dspBlockNum(GetFalseTarget()));
break;
case BBJ_SWITCH:
@@ -857,11 +875,16 @@ void BasicBlock::TransferTarget(BasicBlock* from)
SetEhf(from->GetEhfTargets());
from->bbEhfTargets = nullptr; // Make sure nobody uses the descriptor after this.
break;
+
+ // TransferTarget may be called after setting the source block of `from`'s
+ // successor edges to this block.
+ // This means calling GetTarget/GetTrueTarget/GetFalseTarget would trigger asserts.
+ // Avoid this by accessing the edges directly.
case BBJ_COND:
- SetCond(from->GetTrueTarget(), from->GetFalseTarget());
+ SetCond(from->bbTrueEdge, from->bbFalseEdge);
break;
case BBJ_ALWAYS:
- SetKindAndTarget(from->GetKind(), from->GetTarget());
+ SetKindAndTargetEdge(BBJ_ALWAYS, from->bbTargetEdge);
CopyFlags(from, BBF_NONE_QUIRK);
break;
case BBJ_CALLFINALLY:
@@ -869,10 +892,10 @@ void BasicBlock::TransferTarget(BasicBlock* from)
case BBJ_EHCATCHRET:
case BBJ_EHFILTERRET:
case BBJ_LEAVE:
- SetKindAndTarget(from->GetKind(), from->GetTarget());
+ SetKindAndTargetEdge(from->GetKind(), from->bbTargetEdge);
break;
default:
- SetKindAndTarget(from->GetKind()); // Clear the target
+ SetKindAndTargetEdge(from->GetKind()); // Clear the target
break;
}
assert(KindIs(from->GetKind()));
@@ -985,7 +1008,7 @@ BasicBlock* BasicBlock::GetUniquePred(Compiler* compiler) const
//
BasicBlock* BasicBlock::GetUniqueSucc() const
{
- return KindIs(BBJ_ALWAYS) ? bbTarget : nullptr;
+ return KindIs(BBJ_ALWAYS) ? GetTarget() : nullptr;
}
// Static vars.
@@ -1145,7 +1168,7 @@ unsigned BasicBlock::NumSucc() const
return 1;
case BBJ_COND:
- if (bbTrueTarget == bbFalseTarget)
+ if (bbTrueEdge == bbFalseEdge)
{
return 1;
}
@@ -1180,15 +1203,15 @@ unsigned BasicBlock::NumSucc() const
}
//------------------------------------------------------------------------
-// GetSucc: Returns the requested block successor. See the declaration comment for details.
+// GetSucc: Returns the requested successor edge. See the declaration comment for details.
//
// Arguments:
// i - index of successor to return. 0 <= i <= NumSucc().
//
// Return Value:
-// Requested successor block
+// Requested successor edge
//
-BasicBlock* BasicBlock::GetSucc(unsigned i) const
+FlowEdge* BasicBlock::GetSuccEdge(unsigned i) const
{
assert(i < NumSucc()); // Index bounds check.
switch (bbKind)
@@ -1199,25 +1222,25 @@ BasicBlock* BasicBlock::GetSucc(unsigned i) const
case BBJ_EHCATCHRET:
case BBJ_EHFILTERRET:
case BBJ_LEAVE:
- return bbTarget;
+ return GetTargetEdge();
case BBJ_COND:
if (i == 0)
{
- return bbFalseTarget;
+ return GetFalseEdge();
}
else
{
assert(i == 1);
- assert(bbFalseTarget != bbTrueTarget);
- return bbTrueTarget;
+ assert(bbTrueEdge != bbFalseEdge);
+ return GetTrueEdge();
}
case BBJ_EHFINALLYRET:
- return bbEhfTargets->bbeSuccs[i]->getDestinationBlock();
+ return bbEhfTargets->bbeSuccs[i];
case BBJ_SWITCH:
- return bbSwtTargets->bbsDstTab[i]->getDestinationBlock();
+ return bbSwtTargets->bbsDstTab[i];
default:
unreached();
@@ -1225,6 +1248,20 @@ BasicBlock* BasicBlock::GetSucc(unsigned i) const
}
//------------------------------------------------------------------------
+// GetSucc: Returns the requested block successor. See the declaration comment for details.
+//
+// Arguments:
+// i - index of successor to return. 0 <= i <= NumSucc().
+//
+// Return Value:
+// Requested successor block
+//
+BasicBlock* BasicBlock::GetSucc(unsigned i) const
+{
+ return GetSuccEdge(i)->getDestinationBlock();
+}
+
+//------------------------------------------------------------------------
// NumSucc: Returns the count of block successors. See the declaration comment for details.
//
// Arguments:
@@ -1270,7 +1307,7 @@ unsigned BasicBlock::NumSucc(Compiler* comp)
return 1;
case BBJ_COND:
- if (bbTrueTarget == bbFalseTarget)
+ if (bbTrueEdge == bbFalseEdge)
{
return 1;
}
@@ -1291,16 +1328,16 @@ unsigned BasicBlock::NumSucc(Compiler* comp)
}
//------------------------------------------------------------------------
-// GetSucc: Returns the requested block successor. See the declaration comment for details.
+// GetSucc: Returns the requested successor edge. See the declaration comment for details.
//
// Arguments:
// i - index of successor to return. 0 <= i <= NumSucc(comp).
// comp - Compiler instance
//
// Return Value:
-// Requested successor block
+// Requested successor edge
//
-BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler* comp)
+FlowEdge* BasicBlock::GetSuccEdge(unsigned i, Compiler* comp)
{
assert(comp != nullptr);
@@ -1309,31 +1346,31 @@ BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler* comp)
{
case BBJ_EHFILTERRET:
// Handler is the (sole) normal successor of the filter.
- assert(comp->fgFirstBlockOfHandler(this) == bbTarget);
- return bbTarget;
+ assert(comp->fgFirstBlockOfHandler(this) == GetTarget());
+ return GetTargetEdge();
case BBJ_EHFINALLYRET:
assert(bbEhfTargets != nullptr);
assert(i < bbEhfTargets->bbeCount);
- return bbEhfTargets->bbeSuccs[i]->getDestinationBlock();
+ return bbEhfTargets->bbeSuccs[i];
case BBJ_CALLFINALLY:
case BBJ_CALLFINALLYRET:
case BBJ_ALWAYS:
case BBJ_EHCATCHRET:
case BBJ_LEAVE:
- return bbTarget;
+ return GetTargetEdge();
case BBJ_COND:
if (i == 0)
{
- return bbFalseTarget;
+ return GetFalseEdge();
}
else
{
assert(i == 1);
- assert(bbFalseTarget != bbTrueTarget);
- return bbTrueTarget;
+ assert(bbTrueEdge != bbFalseEdge);
+ return GetTrueEdge();
}
case BBJ_SWITCH:
@@ -1348,6 +1385,21 @@ BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler* comp)
}
}
+//------------------------------------------------------------------------
+// GetSucc: Returns the requested block successor. See the declaration comment for details.
+//
+// Arguments:
+// i - index of successor to return. 0 <= i <= NumSucc(comp).
+// comp - Compiler instance
+//
+// Return Value:
+// Requested successor block
+//
+BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler* comp)
+{
+ return GetSuccEdge(i, comp)->getDestinationBlock();
+}
+
void BasicBlock::InitVarSets(Compiler* comp)
{
VarSetOps::AssignNoCopy(comp, bbVarUse, VarSetOps::MakeEmpty(comp));
@@ -1585,15 +1637,10 @@ BasicBlock* BasicBlock::New(Compiler* compiler)
return block;
}
-BasicBlock* BasicBlock::New(Compiler* compiler, BBKinds kind, BasicBlock* target /* = nullptr */)
+BasicBlock* BasicBlock::New(Compiler* compiler, BBKinds kind)
{
BasicBlock* block = BasicBlock::New(compiler);
-
- // In some cases, we don't know a block's jump target during initialization, so don't check the jump kind/target
- // yet.
- // The checks will be done any time the jump kind/target is read or written to after initialization.
- block->bbKind = kind;
- block->bbTarget = target;
+ block->bbKind = kind;
if (block->KindIs(BBJ_THROW))
{
diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h
index 208eb07e583f..e4b6b9d0daac 100644
--- a/src/coreclr/jit/block.h
+++ b/src/coreclr/jit/block.h
@@ -307,29 +307,17 @@ public:
}
};
-// BBArrayIterator: forward iterator for an array of BasicBlock*, such as the BBswtDesc->bbsDstTab.
+// BBArrayIterator: forward iterator for an array of BasicBlock*.
// It is an error (with assert) to yield a nullptr BasicBlock* in this array.
-// `m_bbEntry` can be nullptr, but it only makes sense if both the begin and end of an iteration range are nullptr
+// `m_edgeEntry` can be nullptr, but it only makes sense if both the begin and end of an iteration range are nullptr
// (meaning, no actual iteration will happen).
//
class BBArrayIterator
{
- // Quirk: Some BasicBlock kinds refer to their successors with BasicBlock pointers,
- // while others use FlowEdge pointers. Eventually, every type will use FlowEdge pointers.
- // For now, support iterating with both types.
- union {
- BasicBlock* const* m_bbEntry;
- FlowEdge* const* m_edgeEntry;
- };
-
- bool iterateEdges;
+ FlowEdge* const* m_edgeEntry;
public:
- BBArrayIterator(BasicBlock* const* bbEntry) : m_bbEntry(bbEntry), iterateEdges(false)
- {
- }
-
- BBArrayIterator(FlowEdge* const* edgeEntry) : m_edgeEntry(edgeEntry), iterateEdges(true)
+ BBArrayIterator(FlowEdge* const* edgeEntry) : m_edgeEntry(edgeEntry)
{
}
@@ -337,14 +325,49 @@ public:
BBArrayIterator& operator++()
{
- assert(m_bbEntry != nullptr);
- ++m_bbEntry;
+ assert(m_edgeEntry != nullptr);
+ ++m_edgeEntry;
return *this;
}
bool operator!=(const BBArrayIterator& i) const
{
- return m_bbEntry != i.m_bbEntry;
+ return m_edgeEntry != i.m_edgeEntry;
+ }
+};
+
+// FlowEdgeArrayIterator: forward iterator for an array of FlowEdge*, such as the BBswtDesc->bbsDstTab.
+// It is an error (with assert) to yield a nullptr FlowEdge* in this array.
+// `m_edgeEntry` can be nullptr, but it only makes sense if both the begin and end of an iteration range are nullptr
+// (meaning, no actual iteration will happen).
+//
+class FlowEdgeArrayIterator
+{
+ FlowEdge* const* m_edgeEntry;
+
+public:
+ FlowEdgeArrayIterator(FlowEdge* const* edgeEntry) : m_edgeEntry(edgeEntry)
+ {
+ }
+
+ FlowEdge* operator*() const
+ {
+ assert(m_edgeEntry != nullptr);
+ FlowEdge* const edge = *m_edgeEntry;
+ assert(edge != nullptr);
+ return edge;
+ }
+
+ FlowEdgeArrayIterator& operator++()
+ {
+ assert(m_edgeEntry != nullptr);
+ ++m_edgeEntry;
+ return *this;
+ }
+
+ bool operator!=(const FlowEdgeArrayIterator& i) const
+ {
+ return m_edgeEntry != i.m_edgeEntry;
}
};
@@ -506,6 +529,179 @@ enum class BasicBlockVisit
// clang-format on
+//-------------------------------------------------------------------------
+// FlowEdge -- control flow edge
+//
+// In compiler terminology the control flow between two BasicBlocks
+// is typically referred to as an "edge". Most well known are the
+// backward branches for loops, which are often called "back-edges".
+//
+// "struct FlowEdge" is the type that represents our control flow edges.
+// This type is a linked list of zero or more "edges".
+// (The list of zero edges is represented by NULL.)
+// Every BasicBlock has a field called bbPreds of this type. This field
+// represents the list of "edges" that flow into this BasicBlock.
+// The FlowEdge type only stores the BasicBlock* of the source for the
+// control flow edge. The destination block for the control flow edge
+// is implied to be the block which contained the bbPreds field.
+//
+// For a switch branch target there may be multiple "edges" that have
+// the same source block (and destination block). We need to count the
+// number of these edges so that during optimization we will know when
+// we have zero of them. Rather than have extra FlowEdge entries we
+// track this via the DupCount property.
+//
+// When we have Profile weight for the BasicBlocks we can usually compute
+// the number of times each edge was executed by examining the adjacent
+// BasicBlock weights. As we are doing for BasicBlocks, we call the number
+// of times that a control flow edge was executed the "edge weight".
+// In order to compute the edge weights we need to use a bounded range
+// for every edge weight. These two fields, 'flEdgeWeightMin' and 'flEdgeWeightMax'
+// are used to hold a bounded range. Most often these will converge such
+// that both values are the same and that value is the exact edge weight.
+// Sometimes we are left with a rage of possible values between [Min..Max]
+// which represents an inexact edge weight.
+//
+// The bbPreds list is initially created by Compiler::fgLinkBasicBlocks()
+// and is incrementally kept up to date.
+//
+// The edge weight are computed by Compiler::fgComputeEdgeWeights()
+// the edge weights are used to straighten conditional branches
+// by Compiler::fgReorderBlocks()
+//
+struct FlowEdge
+{
+private:
+ // The next predecessor edge in the list, nullptr for end of list.
+ FlowEdge* m_nextPredEdge;
+
+ // The source of the control flow
+ BasicBlock* m_sourceBlock;
+
+ // The destination of the control flow
+ BasicBlock* m_destBlock;
+
+ // Edge weights
+ weight_t m_edgeWeightMin;
+ weight_t m_edgeWeightMax;
+
+ // Likelihood that m_sourceBlock transfers control along this edge.
+ // Values in range [0..1]
+ weight_t m_likelihood;
+
+ // The count of duplicate "edges" (used for switch stmts or degenerate branches)
+ unsigned m_dupCount;
+
+ // True if likelihood has been set
+ bool m_likelihoodSet;
+
+public:
+ FlowEdge(BasicBlock* sourceBlock, BasicBlock* destBlock, FlowEdge* rest)
+ : m_nextPredEdge(rest)
+ , m_sourceBlock(sourceBlock)
+ , m_destBlock(destBlock)
+ , m_edgeWeightMin(0)
+ , m_edgeWeightMax(0)
+ , m_likelihood(0)
+ , m_dupCount(0)
+ , m_likelihoodSet(false)
+ {
+ }
+
+ FlowEdge* getNextPredEdge() const
+ {
+ return m_nextPredEdge;
+ }
+
+ FlowEdge** getNextPredEdgeRef()
+ {
+ return &m_nextPredEdge;
+ }
+
+ void setNextPredEdge(FlowEdge* newEdge)
+ {
+ m_nextPredEdge = newEdge;
+ }
+
+ BasicBlock* getSourceBlock() const
+ {
+ assert(m_sourceBlock != nullptr);
+ return m_sourceBlock;
+ }
+
+ void setSourceBlock(BasicBlock* newBlock)
+ {
+ assert(newBlock != nullptr);
+ m_sourceBlock = newBlock;
+ }
+
+ BasicBlock* getDestinationBlock() const
+ {
+ assert(m_destBlock != nullptr);
+ return m_destBlock;
+ }
+
+ void setDestinationBlock(BasicBlock* newBlock)
+ {
+ assert(newBlock != nullptr);
+ m_destBlock = newBlock;
+ }
+
+ weight_t edgeWeightMin() const
+ {
+ return m_edgeWeightMin;
+ }
+
+ weight_t edgeWeightMax() const
+ {
+ return m_edgeWeightMax;
+ }
+
+ // These two methods are used to set new values for edge weights.
+ // They return false if the newWeight is not between the current [min..max]
+ // when slop is non-zero we allow for the case where our weights might be off by 'slop'
+ //
+ bool setEdgeWeightMinChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop);
+ bool setEdgeWeightMaxChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop);
+ void setEdgeWeights(weight_t newMinWeight, weight_t newMaxWeight, BasicBlock* bDst);
+
+ weight_t getLikelihood() const
+ {
+ return m_likelihood;
+ }
+
+ void setLikelihood(weight_t likelihood);
+
+ void clearLikelihood()
+ {
+ m_likelihood = 0.0;
+ m_likelihoodSet = false;
+ }
+
+ bool hasLikelihood() const
+ {
+ return m_likelihoodSet;
+ }
+
+ weight_t getLikelyWeight() const;
+
+ unsigned getDupCount() const
+ {
+ return m_dupCount;
+ }
+
+ void incrementDupCount()
+ {
+ m_dupCount++;
+ }
+
+ void decrementDupCount()
+ {
+ assert(m_dupCount >= 1);
+ m_dupCount--;
+ }
+};
+
//------------------------------------------------------------------------
// BasicBlock: describes a basic block in the flowgraph.
//
@@ -525,19 +721,19 @@ private:
/* The following union describes the jump target(s) of this block */
union {
- unsigned bbTargetOffs; // PC offset (temporary only)
- BasicBlock* bbTarget; // basic block
- BasicBlock* bbTrueTarget; // BBJ_COND jump target when its condition is true (alias for bbTarget)
- BBswtDesc* bbSwtTargets; // switch descriptor
- BBehfDesc* bbEhfTargets; // BBJ_EHFINALLYRET descriptor
+ unsigned bbTargetOffs; // PC offset (temporary only)
+ FlowEdge* bbTargetEdge; // successor edge for block kinds with only one successor (BBJ_ALWAYS, etc)
+ FlowEdge* bbTrueEdge; // BBJ_COND successor edge when its condition is true (alias for bbTargetEdge)
+ BBswtDesc* bbSwtTargets; // switch descriptor
+ BBehfDesc* bbEhfTargets; // BBJ_EHFINALLYRET descriptor
};
- // Points to the successor of a BBJ_COND block if bbTrueTarget is not taken
- BasicBlock* bbFalseTarget;
+ // Successor edge of a BBJ_COND block if bbTrueEdge is not taken
+ FlowEdge* bbFalseEdge;
public:
static BasicBlock* New(Compiler* compiler);
- static BasicBlock* New(Compiler* compiler, BBKinds kind, BasicBlock* target = nullptr);
+ static BasicBlock* New(Compiler* compiler, BBKinds kind);
static BasicBlock* New(Compiler* compiler, BBehfDesc* ehfTargets);
static BasicBlock* New(Compiler* compiler, BBswtDesc* swtTargets);
static BasicBlock* New(Compiler* compiler, BBKinds kind, unsigned targetOffs);
@@ -623,100 +819,135 @@ public:
return bbTargetOffs;
}
- void SetKindAndTarget(BBKinds kind, unsigned targetOffs)
- {
- bbKind = kind;
- bbTargetOffs = targetOffs;
- assert(KindIs(BBJ_ALWAYS, BBJ_COND, BBJ_LEAVE));
- }
-
bool HasTarget() const
{
- // These block types should always have bbTarget set
+ // These block types should always have bbTargetEdge set
return KindIs(BBJ_ALWAYS, BBJ_CALLFINALLY, BBJ_CALLFINALLYRET, BBJ_EHCATCHRET, BBJ_EHFILTERRET, BBJ_LEAVE);
}
BasicBlock* GetTarget() const
{
- // Only block kinds that use `bbTarget` can access it, and it must be non-null.
+ return GetTargetEdge()->getDestinationBlock();
+ }
+
+ FlowEdge* GetTargetEdge() const
+ {
+ // Only block kinds that use `bbTargetEdge` can access it, and it must be non-null.
assert(HasInitializedTarget());
- return bbTarget;
+ assert(bbTargetEdge->getSourceBlock() == this);
+ assert(bbTargetEdge->getDestinationBlock() != nullptr);
+ return bbTargetEdge;
}
- void SetTarget(BasicBlock* target)
+ void SetTargetEdge(FlowEdge* targetEdge)
{
// SetKindAndTarget() nulls target for non-jump kinds,
- // so don't use SetTarget() to null bbTarget without updating bbKind.
- bbTarget = target;
+ // so don't use SetTargetEdge() to null bbTargetEdge without updating bbKind.
+ bbTargetEdge = targetEdge;
assert(HasInitializedTarget());
+ assert(bbTargetEdge->getSourceBlock() == this);
+ assert(bbTargetEdge->getDestinationBlock() != nullptr);
}
BasicBlock* GetTrueTarget() const
{
+ return GetTrueEdge()->getDestinationBlock();
+ }
+
+ FlowEdge* GetTrueEdge() const
+ {
assert(KindIs(BBJ_COND));
- assert(bbTrueTarget != nullptr);
- return bbTrueTarget;
+ assert(bbTrueEdge != nullptr);
+ assert(bbTrueEdge->getSourceBlock() == this);
+ assert(bbTrueEdge->getDestinationBlock() != nullptr);
+ return bbTrueEdge;
}
- void SetTrueTarget(BasicBlock* target)
+ void SetTrueEdge(FlowEdge* trueEdge)
{
assert(KindIs(BBJ_COND));
- assert(target != nullptr);
- bbTrueTarget = target;
+ bbTrueEdge = trueEdge;
+ assert(bbTrueEdge != nullptr);
+ assert(bbTrueEdge->getSourceBlock() == this);
+ assert(bbTrueEdge->getDestinationBlock() != nullptr);
}
bool TrueTargetIs(const BasicBlock* target) const
{
- assert(KindIs(BBJ_COND));
- assert(bbTrueTarget != nullptr);
- return (bbTrueTarget == target);
+ return (GetTrueTarget() == target);
+ }
+
+ bool TrueEdgeIs(const FlowEdge* targetEdge) const
+ {
+ return (GetTrueEdge() == targetEdge);
}
BasicBlock* GetFalseTarget() const
{
+ return GetFalseEdge()->getDestinationBlock();
+ }
+
+ FlowEdge* GetFalseEdge() const
+ {
assert(KindIs(BBJ_COND));
- assert(bbFalseTarget != nullptr);
- return bbFalseTarget;
+ assert(bbFalseEdge != nullptr);
+ assert(bbFalseEdge->getSourceBlock() == this);
+ assert(bbFalseEdge->getDestinationBlock() != nullptr);
+ return bbFalseEdge;
}
- void SetFalseTarget(BasicBlock* target)
+ void SetFalseEdge(FlowEdge* falseEdge)
{
assert(KindIs(BBJ_COND));
- assert(target != nullptr);
- bbFalseTarget = target;
+ bbFalseEdge = falseEdge;
+ assert(bbFalseEdge != nullptr);
+ assert(bbFalseEdge->getSourceBlock() == this);
+ assert(bbFalseEdge->getDestinationBlock() != nullptr);
}
bool FalseTargetIs(const BasicBlock* target) const
{
- assert(KindIs(BBJ_COND));
- assert(bbFalseTarget != nullptr);
- return (bbFalseTarget == target);
+ return (GetFalseTarget() == target);
}
- void SetCond(BasicBlock* trueTarget, BasicBlock* falseTarget)
+ bool FalseEdgeIs(const FlowEdge* targetEdge) const
{
- assert(trueTarget != nullptr);
- bbKind = BBJ_COND;
- bbTrueTarget = trueTarget;
- bbFalseTarget = falseTarget;
+ return (GetFalseEdge() == targetEdge);
}
- // Set both the block kind and target. This can clear `bbTarget` when setting
- // block kinds that don't use `bbTarget`.
- void SetKindAndTarget(BBKinds kind, BasicBlock* target = nullptr)
+ void SetCond(FlowEdge* trueEdge, FlowEdge* falseEdge)
{
- bbKind = kind;
- bbTarget = target;
+ bbKind = BBJ_COND;
+ SetTrueEdge(trueEdge);
+ SetFalseEdge(falseEdge);
+ }
+
+ // In most cases, a block's true and false targets are known by the time SetCond is called.
+ // To simplify the few cases where the false target isn't available until later,
+ // overload SetCond to initialize only the true target.
+ // This simplifies, for example, lowering switch blocks into jump sequences.
+ void SetCond(FlowEdge* trueEdge)
+ {
+ bbKind = BBJ_COND;
+ SetTrueEdge(trueEdge);
+ }
- // If bbKind indicates this block has a jump, bbTarget cannot be null.
+ // Set both the block kind and target edge. This can clear `bbTargetEdge` when setting
+ // block kinds that don't use `bbTargetEdge`.
+ void SetKindAndTargetEdge(BBKinds kind, FlowEdge* targetEdge = nullptr)
+ {
+ bbKind = kind;
+ bbTargetEdge = targetEdge;
+
+ // If bbKind indicates this block has a jump, bbTargetEdge cannot be null.
// You shouldn't use this to set a BBJ_COND, BBJ_SWITCH, or BBJ_EHFINALLYRET.
- assert(HasTarget() ? HasInitializedTarget() : (bbTarget == nullptr));
+ assert(HasTarget() ? HasInitializedTarget() : (bbTargetEdge == nullptr));
}
bool HasInitializedTarget() const
{
assert(HasTarget());
- return (bbTarget != nullptr);
+ return (bbTargetEdge != nullptr);
}
bool TargetIs(const BasicBlock* target) const
@@ -762,19 +993,13 @@ public:
bbEhfTargets = ehfTarget;
}
- // BBJ_CALLFINALLYRET uses the `bbTarget` field. However, also treat it specially:
+ // BBJ_CALLFINALLYRET uses the `bbTargetEdge` field. However, also treat it specially:
// for callers that know they want a continuation, use this function instead of the
// general `GetTarget()` to allow asserting on the block kind.
BasicBlock* GetFinallyContinuation() const
{
assert(KindIs(BBJ_CALLFINALLYRET));
- return bbTarget;
- }
-
- void SetFinallyContinuation(BasicBlock* finallyContinuation)
- {
- assert(KindIs(BBJ_CALLFINALLYRET));
- bbTarget = finallyContinuation;
+ return GetTarget();
}
#ifdef DEBUG
@@ -783,21 +1008,21 @@ public:
BasicBlock* GetTargetRaw() const
{
assert(HasTarget());
- return bbTarget;
+ return (bbTargetEdge == nullptr) ? nullptr : bbTargetEdge->getDestinationBlock();
}
// Return the BBJ_COND true target; it might be null. Only used during dumping.
BasicBlock* GetTrueTargetRaw() const
{
assert(KindIs(BBJ_COND));
- return bbTrueTarget;
+ return (bbTrueEdge == nullptr) ? nullptr : bbTrueEdge->getDestinationBlock();
}
// Return the BBJ_COND false target; it might be null. Only used during dumping.
BasicBlock* GetFalseTargetRaw() const
{
assert(KindIs(BBJ_COND));
- return bbFalseTarget;
+ return (bbFalseEdge == nullptr) ? nullptr : bbFalseEdge->getDestinationBlock();
}
#endif // DEBUG
@@ -1087,7 +1312,11 @@ public:
unsigned NumSucc() const;
unsigned NumSucc(Compiler* comp);
- // GetSucc: Returns the "i"th successor. Requires (0 <= i < NumSucc()).
+ // GetSuccEdge: Returns the "i"th successor edge. Requires (0 <= i < NumSucc()).
+ FlowEdge* GetSuccEdge(unsigned i) const;
+ FlowEdge* GetSuccEdge(unsigned i, Compiler* comp);
+
+ // GetSucc: Returns the "i"th successor block. Requires (0 <= i < NumSucc()).
BasicBlock* GetSucc(unsigned i) const;
BasicBlock* GetSucc(unsigned i, Compiler* comp);
@@ -1566,37 +1795,64 @@ public:
bool HasPotentialEHSuccs(Compiler* comp);
- // BBSuccList: adapter class for forward iteration of block successors, using range-based `for`,
- // normally used via BasicBlock::Succs(), e.g.:
- // for (BasicBlock* const target : block->Succs()) ...
+ // Base class for Successor block/edge iterators.
//
- class BBSuccList
+ class SuccList
{
+ protected:
// For one or two successors, pre-compute and stash the successors inline, in m_succs[], so we don't
// need to call a function or execute another `switch` to get them. Also, pre-compute the begin and end
// points of the iteration, for use by BBArrayIterator. `m_begin` and `m_end` will either point at
// `m_succs` or at the switch table successor array.
- BasicBlock* m_succs[2];
-
- // Quirk: Some BasicBlock kinds refer to their successors with BasicBlock pointers,
- // while others use FlowEdge pointers. Eventually, every type will use FlowEdge pointers.
- // For now, support iterating with both types.
- union {
- BasicBlock* const* m_begin;
- FlowEdge* const* m_beginEdge;
- };
+ FlowEdge* m_succs[2];
+ FlowEdge* const* m_begin;
+ FlowEdge* const* m_end;
- union {
- BasicBlock* const* m_end;
- FlowEdge* const* m_endEdge;
- };
+ SuccList(const BasicBlock* block);
+ };
- bool iterateEdges;
+ // BBSuccList: adapter class for forward iteration of block successors, using range-based `for`,
+ // normally used via BasicBlock::Succs(), e.g.:
+ // for (BasicBlock* const target : block->Succs()) ...
+ //
+ class BBSuccList : private SuccList
+ {
+ public:
+ BBSuccList(const BasicBlock* block) : SuccList(block)
+ {
+ }
+ BBArrayIterator begin() const
+ {
+ return BBArrayIterator(m_begin);
+ }
+
+ BBArrayIterator end() const
+ {
+ return BBArrayIterator(m_end);
+ }
+ };
+
+ // BBSuccEdgeList: adapter class for forward iteration of block successors edges, using range-based `for`,
+ // normally used via BasicBlock::SuccEdges(), e.g.:
+ // for (FlowEdge* const succEdge : block->SuccEdges()) ...
+ //
+ class BBSuccEdgeList : private SuccList
+ {
public:
- BBSuccList(const BasicBlock* block);
- BBArrayIterator begin() const;
- BBArrayIterator end() const;
+ BBSuccEdgeList(const BasicBlock* block) : SuccList(block)
+ {
+ }
+
+ FlowEdgeArrayIterator begin() const
+ {
+ return FlowEdgeArrayIterator(m_begin);
+ }
+
+ FlowEdgeArrayIterator end() const
+ {
+ return FlowEdgeArrayIterator(m_end);
+ }
};
// BBCompilerSuccList: adapter class for forward iteration of block successors, using range-based `for`,
@@ -1610,7 +1866,7 @@ public:
Compiler* m_comp;
BasicBlock* m_block;
- // iterator: forward iterator for an array of BasicBlock*, such as the BBswtDesc->bbsDstTab.
+ // iterator: forward iterator for an array of BasicBlock*
//
class iterator
{
@@ -1660,6 +1916,67 @@ public:
}
};
+ // BBCompilerSuccEdgeList: adapter class for forward iteration of block successors edges, using range-based `for`,
+ // normally used via BasicBlock::SuccEdges(), e.g.:
+ // for (FlowEdge* const succEdge : block->SuccEdges(compiler)) ...
+ //
+ // This version uses NumSucc(Compiler*)/GetSucc(Compiler*). See the documentation there for the explanation
+ // of the implications of this versus the version that does not take `Compiler*`.
+ class BBCompilerSuccEdgeList
+ {
+ Compiler* m_comp;
+ BasicBlock* m_block;
+
+ // iterator: forward iterator for an array of BasicBlock*
+ //
+ class iterator
+ {
+ Compiler* m_comp;
+ BasicBlock* m_block;
+ unsigned m_succNum;
+
+ public:
+ iterator(Compiler* comp, BasicBlock* block, unsigned succNum)
+ : m_comp(comp), m_block(block), m_succNum(succNum)
+ {
+ }
+
+ FlowEdge* operator*() const
+ {
+ assert(m_block != nullptr);
+ FlowEdge* succEdge = m_block->GetSuccEdge(m_succNum, m_comp);
+ assert(succEdge != nullptr);
+ return succEdge;
+ }
+
+ iterator& operator++()
+ {
+ ++m_succNum;
+ return *this;
+ }
+
+ bool operator!=(const iterator& i) const
+ {
+ return m_succNum != i.m_succNum;
+ }
+ };
+
+ public:
+ BBCompilerSuccEdgeList(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block)
+ {
+ }
+
+ iterator begin() const
+ {
+ return iterator(m_comp, m_block, 0);
+ }
+
+ iterator end() const
+ {
+ return iterator(m_comp, m_block, m_block->NumSucc(m_comp));
+ }
+ };
+
// Succs: convenience methods for enabling range-based `for` iteration over a block's successors, e.g.:
// for (BasicBlock* const succ : block->Succs()) ...
//
@@ -1676,6 +1993,16 @@ public:
return BBCompilerSuccList(comp, this);
}
+ BBSuccEdgeList SuccEdges()
+ {
+ return BBSuccEdgeList(this);
+ }
+
+ BBCompilerSuccEdgeList SuccEdges(Compiler* comp)
+ {
+ return BBCompilerSuccEdgeList(comp, this);
+ }
+
// Clone block state and statements from `from` block to `to` block (which must be new/empty)
static void CloneBlockState(Compiler* compiler, BasicBlock* to, const BasicBlock* from);
@@ -1927,12 +2254,11 @@ inline BBArrayIterator BBEhfSuccList::end() const
return BBArrayIterator(m_bbeDesc->bbeSuccs + m_bbeDesc->bbeCount);
}
-// BBSuccList out-of-class-declaration implementations
+// SuccList out-of-class-declaration implementations
//
-inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block)
+inline BasicBlock::SuccList::SuccList(const BasicBlock* block)
{
assert(block != nullptr);
- iterateEdges = false;
switch (block->bbKind)
{
@@ -1950,24 +2276,24 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block)
case BBJ_EHCATCHRET:
case BBJ_EHFILTERRET:
case BBJ_LEAVE:
- m_succs[0] = block->bbTarget;
+ m_succs[0] = block->GetTargetEdge();
m_begin = &m_succs[0];
m_end = &m_succs[1];
break;
case BBJ_COND:
- m_succs[0] = block->bbFalseTarget;
+ m_succs[0] = block->GetFalseEdge();
m_begin = &m_succs[0];
// If both fall-through and branch successors are identical, then only include
// them once in the iteration (this is the same behavior as NumSucc()/GetSucc()).
- if (block->TrueTargetIs(block->GetFalseTarget()))
+ if (block->TrueEdgeIs(block->GetFalseEdge()))
{
m_end = &m_succs[1];
}
else
{
- m_succs[1] = block->bbTrueTarget;
+ m_succs[1] = block->GetTrueEdge();
m_end = &m_succs[2];
}
break;
@@ -1978,26 +2304,22 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block)
// been computed.
if (block->GetEhfTargets() == nullptr)
{
- m_beginEdge = nullptr;
- m_endEdge = nullptr;
+ m_begin = nullptr;
+ m_end = nullptr;
}
else
{
- m_beginEdge = block->GetEhfTargets()->bbeSuccs;
- m_endEdge = block->GetEhfTargets()->bbeSuccs + block->GetEhfTargets()->bbeCount;
+ m_begin = block->GetEhfTargets()->bbeSuccs;
+ m_end = block->GetEhfTargets()->bbeSuccs + block->GetEhfTargets()->bbeCount;
}
-
- iterateEdges = true;
break;
case BBJ_SWITCH:
// We don't use the m_succs in-line data for switches; use the existing jump table in the block.
assert(block->bbSwtTargets != nullptr);
assert(block->bbSwtTargets->bbsDstTab != nullptr);
- m_beginEdge = block->bbSwtTargets->bbsDstTab;
- m_endEdge = block->bbSwtTargets->bbsDstTab + block->bbSwtTargets->bbsCount;
-
- iterateEdges = true;
+ m_begin = block->bbSwtTargets->bbsDstTab;
+ m_end = block->bbSwtTargets->bbsDstTab + block->bbSwtTargets->bbsCount;
break;
default:
@@ -2007,16 +2329,6 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block)
assert(m_end >= m_begin);
}
-inline BBArrayIterator BasicBlock::BBSuccList::begin() const
-{
- return (iterateEdges ? BBArrayIterator(m_beginEdge) : BBArrayIterator(m_begin));
-}
-
-inline BBArrayIterator BasicBlock::BBSuccList::end() const
-{
- return (iterateEdges ? BBArrayIterator(m_endEdge) : BBArrayIterator(m_end));
-}
-
// We have a simpler struct, BasicBlockList, which is simply a singly-linked
// list of blocks.
@@ -2034,206 +2346,23 @@ struct BasicBlockList
}
};
-//-------------------------------------------------------------------------
-// FlowEdge -- control flow edge
-//
-// In compiler terminology the control flow between two BasicBlocks
-// is typically referred to as an "edge". Most well known are the
-// backward branches for loops, which are often called "back-edges".
-//
-// "struct FlowEdge" is the type that represents our control flow edges.
-// This type is a linked list of zero or more "edges".
-// (The list of zero edges is represented by NULL.)
-// Every BasicBlock has a field called bbPreds of this type. This field
-// represents the list of "edges" that flow into this BasicBlock.
-// The FlowEdge type only stores the BasicBlock* of the source for the
-// control flow edge. The destination block for the control flow edge
-// is implied to be the block which contained the bbPreds field.
-//
-// For a switch branch target there may be multiple "edges" that have
-// the same source block (and destination block). We need to count the
-// number of these edges so that during optimization we will know when
-// we have zero of them. Rather than have extra FlowEdge entries we
-// track this via the DupCount property.
-//
-// When we have Profile weight for the BasicBlocks we can usually compute
-// the number of times each edge was executed by examining the adjacent
-// BasicBlock weights. As we are doing for BasicBlocks, we call the number
-// of times that a control flow edge was executed the "edge weight".
-// In order to compute the edge weights we need to use a bounded range
-// for every edge weight. These two fields, 'flEdgeWeightMin' and 'flEdgeWeightMax'
-// are used to hold a bounded range. Most often these will converge such
-// that both values are the same and that value is the exact edge weight.
-// Sometimes we are left with a rage of possible values between [Min..Max]
-// which represents an inexact edge weight.
-//
-// The bbPreds list is initially created by Compiler::fgLinkBasicBlocks()
-// and is incrementally kept up to date.
-//
-// The edge weight are computed by Compiler::fgComputeEdgeWeights()
-// the edge weights are used to straighten conditional branches
-// by Compiler::fgReorderBlocks()
-//
-struct FlowEdge
-{
-private:
- // The next predecessor edge in the list, nullptr for end of list.
- FlowEdge* m_nextPredEdge;
-
- // The source of the control flow
- BasicBlock* m_sourceBlock;
-
- // The destination of the control flow
- BasicBlock* m_destBlock;
-
- // Edge weights
- weight_t m_edgeWeightMin;
- weight_t m_edgeWeightMax;
-
- // Likelihood that m_sourceBlock transfers control along this edge.
- // Values in range [0..1]
- weight_t m_likelihood;
-
- // The count of duplicate "edges" (used for switch stmts or degenerate branches)
- unsigned m_dupCount;
-
- // True if likelihood has been set
- bool m_likelihoodSet;
-
-public:
- FlowEdge(BasicBlock* sourceBlock, BasicBlock* destBlock, FlowEdge* rest)
- : m_nextPredEdge(rest)
- , m_sourceBlock(sourceBlock)
- , m_destBlock(destBlock)
- , m_edgeWeightMin(0)
- , m_edgeWeightMax(0)
- , m_likelihood(0)
- , m_dupCount(0)
- , m_likelihoodSet(false)
- {
- }
-
- FlowEdge* getNextPredEdge() const
- {
- return m_nextPredEdge;
- }
-
- FlowEdge** getNextPredEdgeRef()
- {
- return &m_nextPredEdge;
- }
-
- void setNextPredEdge(FlowEdge* newEdge)
- {
- m_nextPredEdge = newEdge;
- }
-
- BasicBlock* getSourceBlock() const
- {
- assert(m_sourceBlock != nullptr);
- return m_sourceBlock;
- }
-
- void setSourceBlock(BasicBlock* newBlock)
- {
- assert(newBlock != nullptr);
- m_sourceBlock = newBlock;
- }
-
- BasicBlock* getDestinationBlock() const
- {
- assert(m_destBlock != nullptr);
- return m_destBlock;
- }
-
- void setDestinationBlock(BasicBlock* newBlock)
- {
- assert(newBlock != nullptr);
- m_destBlock = newBlock;
- }
-
- weight_t edgeWeightMin() const
- {
- return m_edgeWeightMin;
- }
-
- weight_t edgeWeightMax() const
- {
- return m_edgeWeightMax;
- }
-
- // These two methods are used to set new values for edge weights.
- // They return false if the newWeight is not between the current [min..max]
- // when slop is non-zero we allow for the case where our weights might be off by 'slop'
- //
- bool setEdgeWeightMinChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop);
- bool setEdgeWeightMaxChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop);
- void setEdgeWeights(weight_t newMinWeight, weight_t newMaxWeight, BasicBlock* bDst);
-
- weight_t getLikelihood() const
- {
- return m_likelihood;
- }
-
- void setLikelihood(weight_t likelihood)
- {
- assert(likelihood >= 0.0);
- assert(likelihood <= 1.0);
- m_likelihoodSet = true;
- m_likelihood = likelihood;
- }
+// FlowEdge implementations (that are required to be defined after the declaration of BasicBlock)
- void clearLikelihood()
- {
- m_likelihood = 0.0;
- m_likelihoodSet = false;
- }
-
- bool hasLikelihood() const
- {
- return m_likelihoodSet;
- }
-
- weight_t getLikelyWeight() const
- {
- assert(m_likelihoodSet);
- return m_likelihood * m_sourceBlock->bbWeight;
- }
-
- unsigned getDupCount() const
- {
- return m_dupCount;
- }
-
- void incrementDupCount()
- {
- m_dupCount++;
- }
-
- void decrementDupCount()
- {
- assert(m_dupCount >= 1);
- m_dupCount--;
- }
-};
+inline weight_t FlowEdge::getLikelyWeight() const
+{
+ assert(m_likelihoodSet);
+ return m_likelihood * m_sourceBlock->bbWeight;
+}
// BasicBlock iterator implementations (that are required to be defined after the declaration of FlowEdge)
inline BasicBlock* BBArrayIterator::operator*() const
{
- if (iterateEdges)
- {
- assert(m_edgeEntry != nullptr);
- FlowEdge* edgeTarget = *m_edgeEntry;
- assert(edgeTarget != nullptr);
- assert(edgeTarget->getDestinationBlock() != nullptr);
- return edgeTarget->getDestinationBlock();
- }
-
- assert(m_bbEntry != nullptr);
- BasicBlock* bTarget = *m_bbEntry;
- assert(bTarget != nullptr);
- return bTarget;
+ assert(m_edgeEntry != nullptr);
+ FlowEdge* edgeTarget = *m_edgeEntry;
+ assert(edgeTarget != nullptr);
+ assert(edgeTarget->getDestinationBlock() != nullptr);
+ return edgeTarget->getDestinationBlock();
}
// Pred list iterator implementations (that are required to be defined after the declaration of BasicBlock and FlowEdge)
diff --git a/src/coreclr/jit/clrjit.natvis b/src/coreclr/jit/clrjit.natvis
index 95dd3dc30568..e8ac7e8f7c4a 100644
--- a/src/coreclr/jit/clrjit.natvis
+++ b/src/coreclr/jit/clrjit.natvis
@@ -21,7 +21,7 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u
</Type>
<Type Name="BasicBlock">
- <DisplayString Condition="bbKind==BBJ_COND || bbKind==BBJ_ALWAYS || bbKind==BBJ_LEAVE || bbKind==BBJ_EHCATCHRET || bbKind==BBJ_CALLFINALLY || bbKind==BBJ_CALLFINALLYRET || bbKind==BBJ_EHFILTERRET">BB{bbNum,d}->BB{bbTarget->bbNum,d}; {bbKind,en}</DisplayString>
+ <DisplayString Condition="bbKind==BBJ_COND || bbKind==BBJ_ALWAYS || bbKind==BBJ_LEAVE || bbKind==BBJ_EHCATCHRET || bbKind==BBJ_CALLFINALLY || bbKind==BBJ_CALLFINALLYRET || bbKind==BBJ_EHFILTERRET">BB{bbNum,d}->BB{bbTargetEdge->m_destBlock->bbNum,d}; {bbKind,en}</DisplayString>
<DisplayString Condition="bbKind==BBJ_SWITCH">BB{bbNum,d}; {bbKind,en}; {bbSwtTargets->bbsCount} cases</DisplayString>
<DisplayString Condition="bbKind==BBJ_EHFINALLYRET">BB{bbNum,d}; {bbKind,en}; {bbEhfTargets->bbeCount} succs</DisplayString>
<DisplayString>BB{bbNum,d}; {bbKind,en}</DisplayString>
@@ -86,6 +86,11 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u
<DisplayString>{gtTreeID, d}: [{gtOper,en}, {gtType,en} V{((GenTreeLclFld*)this)-&gt;_gtLclNum,u}[+{((GenTreeLclFld*)this)-&gt;m_lclOffs,u}]]</DisplayString>
</Type>
+ <!-- Scalar evolution -->
+ <Type Name="Scev">
+ <DisplayString>[{Oper,en}, {Type,en}]</DisplayString>
+ </Type>
+
<!-- Register allocation -->
<Type Name="LinearScan">
<DisplayString>LinearScan</DisplayString>
diff --git a/src/coreclr/jit/codegen.h b/src/coreclr/jit/codegen.h
index c36a6776a8cd..7a2359c9fb5f 100644
--- a/src/coreclr/jit/codegen.h
+++ b/src/coreclr/jit/codegen.h
@@ -1183,6 +1183,9 @@ protected:
void genCodeForCpBlkRepMovs(GenTreeBlk* cpBlkNode);
void genCodeForCpBlkUnroll(GenTreeBlk* cpBlkNode);
void genCodeForPhysReg(GenTreePhysReg* tree);
+#ifdef SWIFT_SUPPORT
+ void genCodeForSwiftErrorReg(GenTree* tree);
+#endif // SWIFT_SUPPORT
void genCodeForNullCheck(GenTreeIndir* tree);
void genCodeForCmpXchg(GenTreeCmpXchg* tree);
void genCodeForReuseVal(GenTree* treeNode);
@@ -1257,6 +1260,7 @@ protected:
void genCodeForInitBlkLoop(GenTreeBlk* initBlkNode);
void genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode);
void genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode);
+ unsigned genEmitJumpTable(GenTree* treeNode, bool relativeAddr);
void genJumpTable(GenTree* tree);
void genTableBasedSwitch(GenTree* tree);
#if defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp
index eefee0e6912c..8cf3ac32b3a3 100644
--- a/src/coreclr/jit/codegenarm.cpp
+++ b/src/coreclr/jit/codegenarm.cpp
@@ -647,30 +647,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode)
//
void CodeGen::genJumpTable(GenTree* treeNode)
{
- noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH));
- assert(treeNode->OperGet() == GT_JMPTABLE);
-
- unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount;
- FlowEdge** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab;
- unsigned jmpTabBase;
-
- jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, false);
-
- JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", compiler->compMethodID, jmpTabBase);
-
- for (unsigned i = 0; i < jumpCount; i++)
- {
- BasicBlock* target = (*jumpTable)->getDestinationBlock();
- jumpTable++;
- noway_assert(target->HasFlag(BBF_HAS_LABEL));
-
- JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
-
- GetEmitter()->emitDataGenData(i, target);
- }
-
- GetEmitter()->emitDataGenEnd();
-
+ unsigned jmpTabBase = genEmitJumpTable(treeNode, false);
genMov32RelocatableDataLabel(jmpTabBase, treeNode->GetRegNum());
genProduceReg(treeNode);
diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp
index 4587bace1697..498f227e48d5 100644
--- a/src/coreclr/jit/codegenarm64.cpp
+++ b/src/coreclr/jit/codegenarm64.cpp
@@ -3750,33 +3750,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode)
// emits the table and an instruction to get the address of the first element
void CodeGen::genJumpTable(GenTree* treeNode)
{
- noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH));
- assert(treeNode->OperGet() == GT_JMPTABLE);
-
- unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount;
- FlowEdge** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab;
- unsigned jmpTabOffs;
- unsigned jmpTabBase;
-
- jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true);
-
- jmpTabOffs = 0;
-
- JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", compiler->compMethodID, jmpTabBase);
-
- for (unsigned i = 0; i < jumpCount; i++)
- {
- BasicBlock* target = (*jumpTable)->getDestinationBlock();
- jumpTable++;
- noway_assert(target->HasFlag(BBF_HAS_LABEL));
-
- JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
-
- GetEmitter()->emitDataGenData(i, target);
- };
-
- GetEmitter()->emitDataGenEnd();
-
+ unsigned jmpTabBase = genEmitJumpTable(treeNode, true);
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
diff --git a/src/coreclr/jit/codegenarm64test.cpp b/src/coreclr/jit/codegenarm64test.cpp
index bf37570d597e..90aafb0e4b37 100644
--- a/src/coreclr/jit/codegenarm64test.cpp
+++ b/src/coreclr/jit/codegenarm64test.cpp
@@ -4581,6 +4581,14 @@ void CodeGen::genArm64EmitterUnitTestsSve()
theEmitter->emitIns_R_R_R(INS_sve_subr, EA_SCALABLE, REG_V2, REG_P0, REG_V13,
INS_OPTS_SCALABLE_S); // SUBR <Zdn>.<T>, <Pg>/M, <Zdn>.<T>, <Zm>.<T>
+#ifdef ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED
+ // IF_SVE_AB_3B
+ theEmitter->emitIns_R_R_R(INS_sve_addpt, EA_SCALABLE, REG_V0, REG_P1, REG_V2,
+ INS_OPTS_SCALABLE_D); // ADDPT <Zdn>.D, <Pg>/M, <Zdn>.D, <Zm>.D
+ theEmitter->emitIns_R_R_R(INS_sve_subpt, EA_SCALABLE, REG_V0, REG_P1, REG_V2,
+ INS_OPTS_SCALABLE_D); // SUBPT <Zdn>.D, <Pg>/M, <Zdn>.D, <Zm>.D
+#endif // ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED
+
// IF_SVE_AC_3A
theEmitter->emitIns_R_R_R(INS_sve_sdiv, EA_SCALABLE, REG_V3, REG_P2, REG_V9,
INS_OPTS_SCALABLE_S); // SDIV <Zdn>.<T>, <Pg>/M, <Zdn>.<T>, <Zm>.<T>
@@ -4725,6 +4733,54 @@ void CodeGen::genArm64EmitterUnitTestsSve()
theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_S,
INS_SCALABLE_OPTS_WIDE); // LSR <Zdn>.<T>, <Pg>/M, <Zdn>.<T>, <Zm>.D
+ // IF_SVE_CE_2A
+ theEmitter->emitIns_R_R(INS_sve_pmov, EA_SCALABLE, REG_P2, REG_V12, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_TO_PREDICATE); // PMOV <Pd>.B, <Zn>
+ theEmitter->emitIns_R_R(INS_sve_pmov, EA_SCALABLE, REG_P7, REG_V2, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_TO_PREDICATE); // PMOV <Pd>.H, <Zn>[0]
+
+ // IF_SVE_CE_2B
+ theEmitter->emitIns_R_R_I(INS_sve_pmov, EA_SCALABLE, REG_P15, REG_V7, 7, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_TO_PREDICATE); // PMOV <Pd>.D, <Zn>[<imm>]
+ theEmitter->emitIns_R_R_I(INS_sve_pmov, EA_SCALABLE, REG_P7, REG_V16, 0, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_TO_PREDICATE); // PMOV <Pd>.D, <Zn>[<imm>]
+
+ // IF_SVE_CE_2C
+ theEmitter->emitIns_R_R_I(INS_sve_pmov, EA_SCALABLE, REG_P0, REG_V31, 1, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_TO_PREDICATE); // PMOV <Pd>.H, <Zn>[<imm>]
+ theEmitter->emitIns_R_R_I(INS_sve_pmov, EA_SCALABLE, REG_V1, REG_P1, 0, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_TO_PREDICATE); // PMOV <Pd>.H, <Zn>[<imm>]
+
+ // IF_SVE_CE_2D
+ theEmitter->emitIns_R_R_I(INS_sve_pmov, EA_SCALABLE, REG_P3, REG_V9, 3, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_TO_PREDICATE); // PMOV <Pd>.S, <Zn>[<imm>]
+ theEmitter->emitIns_R_R_I(INS_sve_pmov, EA_SCALABLE, REG_P10, REG_V4, 0, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_TO_PREDICATE); // PMOV <Pd>.S, <Zn>[<imm>]
+
+ // IF_SVE_CF_2A
+ theEmitter->emitIns_R_R(INS_sve_pmov, EA_SCALABLE, REG_V11, REG_P12, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_TO_VECTOR); // PMOV <Zd>, <Pn>.B
+ theEmitter->emitIns_R_R(INS_sve_pmov, EA_SCALABLE, REG_V2, REG_P7, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_TO_VECTOR); // PMOV <Zd>[0], <Pn>.S
+
+ // IF_SVE_CF_2B
+ theEmitter->emitIns_R_R_I(INS_sve_pmov, EA_SCALABLE, REG_V6, REG_P8, 7, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_TO_VECTOR); // PMOV <Zd>[<imm>], <Pn>.D
+ theEmitter->emitIns_R_R_I(INS_sve_pmov, EA_SCALABLE, REG_V9, REG_P7, 0, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_TO_VECTOR); // PMOV <Zd>[<imm>], <Pn>.D
+
+ // IF_SVE_CF_2C
+ theEmitter->emitIns_R_R_I(INS_sve_pmov, EA_SCALABLE, REG_V8, REG_P4, 1, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_TO_VECTOR); // PMOV <Zd>[<imm>], <Pn>.H
+ theEmitter->emitIns_R_R_I(INS_sve_pmov, EA_SCALABLE, REG_V5, REG_P9, 0, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_TO_VECTOR); // PMOV <Zd>[<imm>], <Pn>.H
+
+ // IF_SVE_CF_2D
+ theEmitter->emitIns_R_R_I(INS_sve_pmov, EA_SCALABLE, REG_V14, REG_P2, 3, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_TO_VECTOR); // PMOV <Zd>[<imm>], <Pn>.S
+ theEmitter->emitIns_R_R_I(INS_sve_pmov, EA_SCALABLE, REG_V3, REG_P15, 0, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_TO_VECTOR); // PMOV <Zd>[<imm>], <Pn>.S
+
// IF_SVE_CJ_2A
theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P1, REG_P2,
INS_OPTS_SCALABLE_B); // REV <Pd>.<T>, <Pn>.<T>
@@ -5101,6 +5157,22 @@ void CodeGen::genArm64EmitterUnitTestsSve()
theEmitter->emitIns_R_R_R(INS_sve_fsubr, EA_SCALABLE, REG_V6, REG_P4, REG_V29,
INS_OPTS_SCALABLE_D); // FSUBR <Zdn>.<T>, <Pg>/M, <Zdn>.<T>, <Zm>.<T>
+ // IF_SVE_HL_3B
+ theEmitter->emitIns_R_R_R(INS_sve_bfadd, EA_SCALABLE, REG_V0, REG_P0, REG_V1,
+ INS_OPTS_SCALABLE_H); // BFADD <Zdn>.H, <Pg>/M, <Zdn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_bfmax, EA_SCALABLE, REG_V2, REG_P1, REG_V3,
+ INS_OPTS_SCALABLE_H); // BFMAX <Zdn>.H, <Pg>/M, <Zdn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_bfmaxnm, EA_SCALABLE, REG_V4, REG_P2, REG_V5,
+ INS_OPTS_SCALABLE_H); // BFMAXNM <Zdn>.H, <Pg>/M, <Zdn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_bfmin, EA_SCALABLE, REG_V6, REG_P3, REG_V7,
+ INS_OPTS_SCALABLE_H); // BFMIN <Zdn>.H, <Pg>/M, <Zdn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_bfminnm, EA_SCALABLE, REG_V8, REG_P4, REG_V9,
+ INS_OPTS_SCALABLE_H); // BFMINNM <Zdn>.H, <Pg>/M, <Zdn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_bfmul, EA_SCALABLE, REG_V10, REG_P5, REG_V11,
+ INS_OPTS_SCALABLE_H); // BFMUL <Zdn>.H, <Pg>/M, <Zdn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_bfsub, EA_SCALABLE, REG_V12, REG_P6, REG_V13,
+ INS_OPTS_SCALABLE_H); // BFSUB <Zdn>.H, <Pg>/M, <Zdn>.H, <Zm>.H
+
// IF_SVE_HT_4A
theEmitter->emitIns_R_R_R_R(INS_sve_facge, EA_SCALABLE, REG_P0, REG_P0, REG_V10, REG_V31,
INS_OPTS_SCALABLE_H); // FACGE <Pd>.<T>, <Pg>/Z, <Zn>.<T>, <Zm>.<T>
@@ -5125,6 +5197,16 @@ void CodeGen::genArm64EmitterUnitTestsSve()
theEmitter->emitIns_R_R_R_R(INS_sve_fcmuo, EA_SCALABLE, REG_P5, REG_P2, REG_V31, REG_V20,
INS_OPTS_SCALABLE_S); // FCMUO <Pd>.<T>, <Pg>/Z, <Zn>.<T>, <Zm>.<T>
+ // IF_SVE_HU_4A
+ theEmitter->emitIns_R_R_R_R(INS_sve_fmla, EA_SCALABLE, REG_V0, REG_P0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_H); // FMLA <Zda>.<T>, <Pg>/M, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R_R(INS_sve_fmls, EA_SCALABLE, REG_V3, REG_P2, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_S); // FMLS <Zda>.<T>, <Pg>/M, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R_R(INS_sve_fnmla, EA_SCALABLE, REG_V6, REG_P4, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_D); // FNMLA <Zda>.<T>, <Pg>/M, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R_R(INS_sve_fnmls, EA_SCALABLE, REG_V9, REG_P6, REG_V10, REG_V11,
+ INS_OPTS_SCALABLE_H); // FNMLS <Zda>.<T>, <Pg>/M, <Zn>.<T>, <Zm>.<T>
+
// IF_SVE_AF_3A
theEmitter->emitIns_R_R_R(INS_sve_andv, EA_1BYTE, REG_V0, REG_P0, REG_V0,
INS_OPTS_SCALABLE_B); // ANDV <V><d>, <Pg>, <Zn>.<T>
@@ -5269,6 +5351,10 @@ void CodeGen::genArm64EmitterUnitTestsSve()
theEmitter->emitIns_R_R_R(INS_sve_umulh, EA_SCALABLE, REG_V31, REG_V5, REG_V0, INS_OPTS_SCALABLE_D,
INS_SCALABLE_OPTS_UNPREDICATED); // UMULH <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+ // IF_SVE_BD_3B
+ theEmitter->emitIns_R_R_R(INS_sve_pmul, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_B); // PMUL <Zd>.B, <Zn>.B, <Zm>.B
+
// IF_SVE_BE_3A
theEmitter->emitIns_R_R_R(INS_sve_sqdmulh, EA_SCALABLE, REG_V7, REG_V28, REG_V0,
INS_OPTS_SCALABLE_B); // SQDMULH <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
@@ -5283,6 +5369,24 @@ void CodeGen::genArm64EmitterUnitTestsSve()
theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V29, REG_V10, REG_V22, INS_OPTS_SCALABLE_S,
INS_SCALABLE_OPTS_UNPREDICATED_WIDE); // LSR <Zd>.<T>, <Zn>.<T>, <Zm>.D
+ // IF_SVE_BH_3A
+ theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_LSL_N); // ADR <Zd>.<T>, [<Zn>.<T>, <Zm>.<T>{, <mod><amount>}]
+ theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_LSL_N); // ADR <Zd>.<T>, [<Zn>.<T>, <Zm>.<T>{, <mod><amount>}]
+
+ // IF_SVE_BH_3B
+ theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0,
+ INS_OPTS_SCALABLE_D_SXTW); // ADR <Zd>.D, [<Zn>.D, <Zm>.D, SXTW{<amount>}]
+ theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2,
+ INS_OPTS_SCALABLE_D_SXTW); // ADR <Zd>.D, [<Zn>.D, <Zm>.D, SXTW{<amount>}]
+
+ // IF_SVE_BH_3B_A
+ theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0,
+ INS_OPTS_SCALABLE_D_UXTW); // ADR <Zd>.D, [<Zn>.D, <Zm>.D, UXTW{<amount>}]
+ theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3,
+ INS_OPTS_SCALABLE_D_UXTW); // ADR <Zd>.D, [<Zn>.D, <Zm>.D, UXTW{<amount>}]
+
// IF_SVE_BK_3A
theEmitter->emitIns_R_R_R(INS_sve_ftssel, EA_SCALABLE, REG_V17, REG_V16, REG_V15,
INS_OPTS_SCALABLE_D); // FTSSEL <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
@@ -5435,6 +5539,235 @@ void CodeGen::genArm64EmitterUnitTestsSve()
theEmitter->emitIns_R_R_R(INS_sve_zipq2, EA_SCALABLE, REG_V12, REG_V13, REG_V14,
INS_OPTS_SCALABLE_B); // ZIPQ2 <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+ // IF_SVE_FL_3A
+ theEmitter->emitIns_R_R_R(INS_sve_sabdlb, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_H); // SABDLB <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_sabdlt, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_S); // SABDLT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_saddlb, EA_SCALABLE, REG_V6, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_D); // SADDLB <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_saddlt, EA_SCALABLE, REG_V9, REG_V10, REG_V11,
+ INS_OPTS_SCALABLE_H); // SADDLT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_ssublb, EA_SCALABLE, REG_V12, REG_V13, REG_V14,
+ INS_OPTS_SCALABLE_S); // SSUBLB <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_ssublt, EA_SCALABLE, REG_V15, REG_V16, REG_V17,
+ INS_OPTS_SCALABLE_D); // SSUBLT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_uabdlb, EA_SCALABLE, REG_V18, REG_V19, REG_V20,
+ INS_OPTS_SCALABLE_H); // UABDLB <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_uabdlt, EA_SCALABLE, REG_V21, REG_V22, REG_V24,
+ INS_OPTS_SCALABLE_S); // UABDLT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_uaddlb, EA_SCALABLE, REG_V24, REG_V25, REG_V26,
+ INS_OPTS_SCALABLE_D); // UADDLB <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_uaddlt, EA_SCALABLE, REG_V27, REG_V28, REG_V29,
+ INS_OPTS_SCALABLE_H); // UADDLT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_usublb, EA_SCALABLE, REG_V30, REG_V31, REG_V0,
+ INS_OPTS_SCALABLE_S); // USUBLB <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_usublt, EA_SCALABLE, REG_V1, REG_V2, REG_V3,
+ INS_OPTS_SCALABLE_D); // USUBLT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+
+ // IF_SVE_FM_3A
+ theEmitter->emitIns_R_R_R(INS_sve_saddwb, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_H); // SADDWB <Zd>.<T>, <Zn>.<T>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_saddwt, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_S); // SADDWT <Zd>.<T>, <Zn>.<T>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_ssubwb, EA_SCALABLE, REG_V6, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_D); // SSUBWB <Zd>.<T>, <Zn>.<T>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_ssubwt, EA_SCALABLE, REG_V9, REG_V10, REG_V11,
+ INS_OPTS_SCALABLE_H); // SSUBWT <Zd>.<T>, <Zn>.<T>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_uaddwb, EA_SCALABLE, REG_V12, REG_V13, REG_V14,
+ INS_OPTS_SCALABLE_S); // UADDWB <Zd>.<T>, <Zn>.<T>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_uaddwt, EA_SCALABLE, REG_V15, REG_V16, REG_V17,
+ INS_OPTS_SCALABLE_D); // UADDWT <Zd>.<T>, <Zn>.<T>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_usubwb, EA_SCALABLE, REG_V18, REG_V19, REG_V20,
+ INS_OPTS_SCALABLE_H); // USUBWB <Zd>.<T>, <Zn>.<T>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_usubwt, EA_SCALABLE, REG_V21, REG_V22, REG_V23,
+ INS_OPTS_SCALABLE_S); // USUBWT <Zd>.<T>, <Zn>.<T>, <Zm>.<Tb>
+
+ // IF_SVE_FN_3A
+ theEmitter->emitIns_R_R_R(INS_sve_pmullb, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_H); // PMULLB <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_pmullt, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_D); // PMULLT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_smullb, EA_SCALABLE, REG_V6, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_H); // SMULLB <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_smullt, EA_SCALABLE, REG_V9, REG_V10, REG_V11,
+ INS_OPTS_SCALABLE_D); // SMULLT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_sqdmullb, EA_SCALABLE, REG_V12, REG_V13, REG_V14,
+ INS_OPTS_SCALABLE_H); // SQDMULLB <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_sqdmullt, EA_SCALABLE, REG_V15, REG_V16, REG_V17,
+ INS_OPTS_SCALABLE_D); // SQDMULLT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_umullb, EA_SCALABLE, REG_V18, REG_V19, REG_V20,
+ INS_OPTS_SCALABLE_H); // UMULLB <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_umullt, EA_SCALABLE, REG_V21, REG_V22, REG_V23,
+ INS_OPTS_SCALABLE_D); // UMULLT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+
+ // IF_SVE_FN_3B
+ theEmitter->emitIns_R_R_R(INS_sve_pmullb, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_Q); // PMULLB <Zd>.Q, <Zn>.D, <Zm>.D
+ theEmitter->emitIns_R_R_R(INS_sve_pmullt, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_Q); // PMULLT <Zd>.Q, <Zn>.D, <Zm>.D
+
+ // IF_SVE_FO_3A
+ theEmitter->emitIns_R_R_R(INS_sve_smmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_S); // SMMLA <Zda>.S, <Zn>.B, <Zm>.B
+ theEmitter->emitIns_R_R_R(INS_sve_ummla, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_S); // UMMLA <Zda>.S, <Zn>.B, <Zm>.B
+ theEmitter->emitIns_R_R_R(INS_sve_usmmla, EA_SCALABLE, REG_V6, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_S); // USMMLA <Zda>.S, <Zn>.B, <Zm>.B
+
+ // IF_SVE_FP_3A
+ theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_B); // EORBT <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_H); // EORBT <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V6, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_S); // EORTB <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V9, REG_V10, REG_V11,
+ INS_OPTS_SCALABLE_D); // EORTB <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+
+ // IF_SVE_FQ_3A
+ theEmitter->emitIns_R_R_R(INS_sve_bdep, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_B); // BDEP <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_bext, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_H); // BEXT <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_bgrp, EA_SCALABLE, REG_V6, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_S); // BGRP <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_bgrp, EA_SCALABLE, REG_V9, REG_V10, REG_V11,
+ INS_OPTS_SCALABLE_D); // BGRP <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+
+ // IF_SVE_FS_3A
+ theEmitter->emitIns_R_R_R(INS_sve_saddlbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_H); // SADDLBT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_ssublbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_S); // SSUBLBT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_ssubltb, EA_SCALABLE, REG_V6, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_D); // SSUBLTB <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+
+ // IF_SVE_FW_3A
+ theEmitter->emitIns_R_R_R(INS_sve_saba, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_B); // SABA <Zda>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_saba, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_H); // SABA <Zda>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_uaba, EA_SCALABLE, REG_V6, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_S); // UABA <Zda>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_uaba, EA_SCALABLE, REG_V9, REG_V10, REG_V11,
+ INS_OPTS_SCALABLE_D); // UABA <Zda>.<T>, <Zn>.<T>, <Zm>.<T>
+
+ // IF_SVE_FX_3A
+ theEmitter->emitIns_R_R_R(INS_sve_sabalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_H); // SABALB <Zda>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_sabalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_S); // SABALT <Zda>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_uabalb, EA_SCALABLE, REG_V6, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_D); // UABALB <Zda>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_uabalt, EA_SCALABLE, REG_V9, REG_V10, REG_V11,
+ INS_OPTS_SCALABLE_H); // UABALT <Zda>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+
+#ifdef ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED
+ // IF_SVE_GC_3A
+ theEmitter->emitIns_R_R_R(INS_sve_addhnb, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_B); // ADDHNB <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_addhnt, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_H); // ADDHNT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_raddhnb, EA_SCALABLE, REG_V6, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_S); // RADDHNB <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_raddhnt, EA_SCALABLE, REG_V9, REG_V10, REG_V11,
+ INS_OPTS_SCALABLE_B); // RADDHNT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_rsubhnb, EA_SCALABLE, REG_V12, REG_V13, REG_V14,
+ INS_OPTS_SCALABLE_H); // RSUBHNB <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_rsubhnt, EA_SCALABLE, REG_V15, REG_V16, REG_V17,
+ INS_OPTS_SCALABLE_S); // RSUBHNT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_subhnb, EA_SCALABLE, REG_V18, REG_V19, REG_V20,
+ INS_OPTS_SCALABLE_B); // SUBHNB <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ theEmitter->emitIns_R_R_R(INS_sve_subhnt, EA_SCALABLE, REG_V21, REG_V22, REG_V23,
+ INS_OPTS_SCALABLE_H); // SUBHNT <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+#endif // ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED
+
+ // IF_SVE_GF_3A
+ theEmitter->emitIns_R_R_R(INS_sve_histseg, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_B); // HISTSEG <Zd>.B, <Zn>.B, <Zm>.B
+
+ // IF_SVE_GW_3A
+ theEmitter->emitIns_R_R_R(INS_sve_fclamp, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_H); // FCLAMP <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_fclamp, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_S); // FCLAMP <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_fclamp, EA_SCALABLE, REG_V6, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_D); // FCLAMP <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+
+ // IF_SVE_GW_3B
+ theEmitter->emitIns_R_R_R(INS_sve_bfclamp, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_H); // BFCLAMP <Zd>.H, <Zn>.H, <Zm>.H
+
+ // IF_SVE_HK_3A
+ theEmitter->emitIns_R_R_R(INS_sve_fadd, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED); // FADD <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_fmul, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED); // FMUL <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_frecps, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED); // FRECPS <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_frsqrts, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED); // FRSQRTS <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_fsub, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED); // FSUB <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R(INS_sve_ftsmul, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED); // FTSMUL <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
+
+ // IF_SVE_HK_3B
+ theEmitter->emitIns_R_R_R(INS_sve_bfadd, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED); // BFADD <Zd>.H, <Zn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_bfmul, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED); // BFMUL <Zd>.H, <Zn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_bfsub, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED); // BFSUB <Zd>.H, <Zn>.H, <Zm>.H
+
+#ifdef ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED
+ // IF_SVE_AT_3B
+ theEmitter->emitIns_R_R_R(INS_sve_addpt, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED); // ADDPT <Zd>.D, <Zn>.D, <Zm>.D
+ theEmitter->emitIns_R_R_R(INS_sve_subpt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED); // SUBPT <Zd>.D, <Zn>.D, <Zm>.D
+#endif // ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED
+
+ // IF_SVE_AU_3A
+ theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED); // AND <Zd>.D, <Zn>.D, <Zm>.D
+ theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED); // BIC <Zd>.D, <Zn>.D, <Zm>.D
+ theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED); // EOR <Zd>.D, <Zn>.D, <Zm>.D
+ theEmitter->emitIns_R_R_R(INS_sve_mov, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED); // MOV <Zd>.D, <Zn>.D
+ theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED); // ORR <Zd>.D, <Zn>.D, <Zm>.D
+
+ // IF_SVE_AV_3A
+ theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_D); // BCAX <Zdn>.D, <Zdn>.D, <Zm>.D, <Zk>.D
+ theEmitter->emitIns_R_R_R(INS_sve_bsl, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_D); // BSL <Zdn>.D, <Zdn>.D, <Zm>.D, <Zk>.D
+ theEmitter->emitIns_R_R_R(INS_sve_bsl1n, EA_SCALABLE, REG_V6, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_D); // BSL1N <Zdn>.D, <Zdn>.D, <Zm>.D, <Zk>.D
+ theEmitter->emitIns_R_R_R(INS_sve_bsl2n, EA_SCALABLE, REG_V9, REG_V10, REG_V11,
+ INS_OPTS_SCALABLE_D); // BSL2N <Zdn>.D, <Zdn>.D, <Zm>.D, <Zk>.D
+ theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14,
+ INS_OPTS_SCALABLE_D); // EOR3 <Zdn>.D, <Zdn>.D, <Zm>.D, <Zk>.D
+ theEmitter->emitIns_R_R_R(INS_sve_nbsl, EA_SCALABLE, REG_V15, REG_V16, REG_V17,
+ INS_OPTS_SCALABLE_D); // NBSL <Zdn>.D, <Zdn>.D, <Zm>.D, <Zk>.D
+
+ // IF_SVE_BB_2A
+ theEmitter->emitIns_R_R_I(INS_sve_addpl, EA_8BYTE, REG_R0, REG_R1, -32); // ADDPL <Xd|SP>, <Xn|SP>, #<imm>
+ theEmitter->emitIns_R_R_I(INS_sve_addpl, EA_8BYTE, REG_R2, REG_SP, 0); // ADDPL <Xd|SP>, <Xn|SP>, #<imm>
+ theEmitter->emitIns_R_R_I(INS_sve_addvl, EA_8BYTE, REG_R3, REG_R4, 5); // ADDVL <Xd|SP>, <Xn|SP>, #<imm>
+ theEmitter->emitIns_R_R_I(INS_sve_addvl, EA_8BYTE, REG_SP, REG_R5, 31); // ADDVL <Xd|SP>, <Xn|SP>, #<imm>
+ theEmitter->emitIns_R_R_I(INS_sve_addvl, EA_8BYTE, REG_SP, REG_SP, 0); // ADDVL <Xd|SP>, <Xn|SP>, #<imm>
+
+ // IF_SVE_BC_1A
+ theEmitter->emitIns_R_I(INS_sve_rdvl, EA_8BYTE, REG_R0, -32); // RDVL <Xd>, #<imm>
+ theEmitter->emitIns_R_I(INS_sve_rdvl, EA_8BYTE, REG_R5, 0); // RDVL <Xd>, #<imm>
+ theEmitter->emitIns_R_I(INS_sve_rdvl, EA_8BYTE, REG_R10, 5); // RDVL <Xd>, #<imm>
+ theEmitter->emitIns_R_I(INS_sve_rdvl, EA_8BYTE, REG_R15, 31); // RDVL <Xd>, #<imm>
+
// IF_SVE_BL_1A
theEmitter->emitIns_R_PATTERN_I(INS_sve_cntb, EA_8BYTE, REG_R0, SVE_PATTERN_POW2,
1); // CNTB <Xd>{, <pattern>{, MUL #<imm>}}
@@ -5547,6 +5880,14 @@ void CodeGen::genArm64EmitterUnitTestsSve()
theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V2, REG_P6, REG_V28,
INS_OPTS_SCALABLE_S); // SPLICE <Zdn>.<T>, <Pv>, <Zdn>.<T>, <Zm>.<T>
+ // IF_SVE_CW_4A
+ theEmitter->emitIns_R_R_R(INS_sve_mov, EA_SCALABLE, REG_V0, REG_P0, REG_V30, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_PREDICATE_MERGE); // MOV <Zd>.<T>, <Pv>/M, <Zn>.<T>
+ theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V29, REG_P15, REG_V28, REG_V4, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED); // SEL <Zd>.<T>, <Pv>, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V5, REG_P13, REG_V27, REG_V5, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED); // SEL <Zd>.<T>, <Pv>, <Zn>.<T>, <Zm>.<T>
+
// IF_SVE_EQ_3A
// Note: Scalable size is the size of the destination <T>, not the source <Tb>.
theEmitter->emitIns_R_R_R(INS_sve_sadalp, EA_SCALABLE, REG_V26, REG_P3, REG_V8,
@@ -5816,8 +6157,24 @@ void CodeGen::genArm64EmitterUnitTestsSve()
theEmitter->emitIns_R(INS_sve_aesimc, EA_SCALABLE, REG_V0); // AESIMC <Zdn>.B, <Zdn>.B
theEmitter->emitIns_R(INS_sve_aesmc, EA_SCALABLE, REG_V5); // AESMC <Zdn>.B, <Zdn>.B
-// IF_SVE_GS_3A
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED
+ // IF_SVE_GN_3A
+ theEmitter->emitIns_R_R_R(INS_sve_fmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_B); // FMLALB <Zda>.H, <Zn>.B, <Zm>.B
+ theEmitter->emitIns_R_R_R(INS_sve_fmlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_B); // FMLALT <Zda>.H, <Zn>.B, <Zm>.B
+
+ // IF_SVE_GO_3A
+ theEmitter->emitIns_R_R_R(INS_sve_fmlallbb, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_B); // FMLALLBB <Zda>.S, <Zn>.B, <Zm>.B
+ theEmitter->emitIns_R_R_R(INS_sve_fmlallbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_B); // FMLALLBT <Zda>.S, <Zn>.B, <Zm>.B
+ theEmitter->emitIns_R_R_R(INS_sve_fmlalltb, EA_SCALABLE, REG_V6, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_B); // FMLALLTB <Zda>.S, <Zn>.B, <Zm>.B
+ theEmitter->emitIns_R_R_R(INS_sve_fmlalltt, EA_SCALABLE, REG_V9, REG_V10, REG_V11,
+ INS_OPTS_SCALABLE_B); // FMLALLTT <Zda>.S, <Zn>.B, <Zm>.B
+
+ // IF_SVE_GS_3A
theEmitter->emitIns_R_R_R(INS_sve_faddqv, EA_8BYTE, REG_V16, REG_P0, REG_V12,
INS_OPTS_SCALABLE_H); // FADDQV <Vd>.<T>, <Pg>, <Zn>.<Tb>
theEmitter->emitIns_R_R_R(INS_sve_fmaxnmqv, EA_8BYTE, REG_V17, REG_P1, REG_V11,
@@ -5950,6 +6307,49 @@ void CodeGen::genArm64EmitterUnitTestsSve()
theEmitter->emitIns_R_R_R_I(INS_sve_fmlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 7,
INS_OPTS_SCALABLE_H); // FMLSLT <Zda>.S, <Zn>.H, <Zm>.H[<imm>]
+ // IF_SVE_HA_3A
+ theEmitter->emitIns_R_R_R(INS_sve_bfdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_H); // BFDOT <Zda>.S, <Zn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_fdot, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_H); // FDOT <Zda>.S, <Zn>.H, <Zm>.H
+
+#ifdef ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED
+ // IF_SVE_HA_3A_E
+ theEmitter->emitIns_R_R_R(INS_sve_fdot, EA_SCALABLE, REG_V6, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_B); // FDOT <Zda>.H, <Zn>.B, <Zm>.B
+
+ // IF_SVE_HA_3A_F
+ theEmitter->emitIns_R_R_R(INS_sve_fdot, EA_SCALABLE, REG_V9, REG_V10, REG_V11); // FDOT <Zda>.S, <Zn>.B, <Zm>.B
+#endif // ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED
+
+ // IF_SVE_HB_3A
+ theEmitter->emitIns_R_R_R(INS_sve_bfmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_H); // BFMLALB <Zda>.S, <Zn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_bfmlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_H); // BFMLALT <Zda>.S, <Zn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_bfmlslb, EA_SCALABLE, REG_V6, REG_V7, REG_V8,
+ INS_OPTS_SCALABLE_H); // BFMLSLB <Zda>.S, <Zn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_bfmlslt, EA_SCALABLE, REG_V9, REG_V10, REG_V11,
+ INS_OPTS_SCALABLE_H); // BFMLSLT <Zda>.S, <Zn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_fmlalb, EA_SCALABLE, REG_V12, REG_V13, REG_V14,
+ INS_OPTS_SCALABLE_H); // FMLALB <Zda>.S, <Zn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_fmlalt, EA_SCALABLE, REG_V15, REG_V16, REG_V17,
+ INS_OPTS_SCALABLE_H); // FMLALT <Zda>.S, <Zn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_fmlslb, EA_SCALABLE, REG_V18, REG_V19, REG_V20,
+ INS_OPTS_SCALABLE_H); // FMLSLB <Zda>.S, <Zn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_fmlslt, EA_SCALABLE, REG_V21, REG_V22, REG_V23,
+ INS_OPTS_SCALABLE_H); // FMLSLT <Zda>.S, <Zn>.H, <Zm>.H
+
+ // IF_SVE_HD_3A
+ theEmitter->emitIns_R_R_R(INS_sve_bfmmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_H); // BFMMLA <Zda>.S, <Zn>.H, <Zm>.H
+
+#ifdef ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED
+ // IF_SVE_HD_3A_A
+ theEmitter->emitIns_R_R_R(INS_sve_fmmla, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_D); // FMMLA <Zda>.D, <Zn>.D, <Zm>.D
+#endif // ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED
+
// IF_SVE_HE_3A
theEmitter->emitIns_R_R_R(INS_sve_faddv, EA_2BYTE, REG_V21, REG_P7, REG_V7,
INS_OPTS_SCALABLE_H); // FADDV <V><d>, <Pg>, <Zn>.<T>
@@ -6573,6 +6973,16 @@ void CodeGen::genArm64EmitterUnitTestsSve()
theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V3, 127,
INS_OPTS_SCALABLE_D); // MUL <Zdn>.<T>, <Zdn>.<T>, #<imm>
+ // IF_SVE_EF_3A
+ theEmitter->emitIns_R_R_R(INS_sve_sdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_H); // SDOT <Zda>.S, <Zn>.H, <Zm>.H
+ theEmitter->emitIns_R_R_R(INS_sve_udot, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_H); // UDOT <Zda>.S, <Zn>.H, <Zm>.H
+
+ // IF_SVE_EI_3A
+ theEmitter->emitIns_R_R_R(INS_sve_usdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_B); // USDOT <Zda>.S, <Zn>.B, <Zm>.B
+
// IF_SVE_FA_3A
theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0,
INS_OPTS_SCALABLE_B); // CDOT <Zda>.S, <Zn>.B, <Zm>.B[<imm>], <const>
@@ -7630,6 +8040,18 @@ void CodeGen::genArm64EmitterUnitTestsSve()
theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P3, REG_V0, REG_V6, 270,
INS_OPTS_SCALABLE_D); // FCMLA <Zda>.<T>, <Pg>/M, <Zn>.<T>, <Zm>.<T>, <const>
+ // IF_SVE_GI_4A
+ theEmitter->emitIns_R_R_R_R(INS_sve_histcnt, EA_SCALABLE, REG_V0, REG_P0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_S); // HISTCNT <Zd>.<T>, <Pg>/Z, <Zn>.<T>, <Zm>.<T>
+ theEmitter->emitIns_R_R_R_R(INS_sve_histcnt, EA_SCALABLE, REG_V3, REG_P7, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_D); // HISTCNT <Zd>.<T>, <Pg>/Z, <Zn>.<T>, <Zm>.<T>
+
+ // IF_SVE_GJ_3A
+ theEmitter->emitIns_R_R_R(INS_sve_rax1, EA_SCALABLE, REG_V0, REG_V1, REG_V2,
+ INS_OPTS_SCALABLE_D); // RAX1 <Zd>.D, <Zn>.D, <Zm>.D
+ theEmitter->emitIns_R_R_R(INS_sve_sm4ekey, EA_SCALABLE, REG_V3, REG_V4, REG_V5,
+ INS_OPTS_SCALABLE_S); // SM4EKEY <Zd>.S, <Zn>.S, <Zm>.S
+
// IF_SVE_HI_3A
theEmitter->emitIns_R_R_R(INS_sve_fcmeq, EA_SCALABLE, REG_P2, REG_P3, REG_V4,
INS_OPTS_SCALABLE_H); // FCMEQ <Pd>.<T>, <Pg>/Z, <Zn>.<T>, #0.0
@@ -7747,6 +8169,78 @@ void CodeGen::genArm64EmitterUnitTestsSve()
theEmitter->emitIns_R_R_I(INS_sve_str, EA_SCALABLE, REG_V2, REG_R3, 255, INS_OPTS_NONE,
INS_SCALABLE_OPTS_UNPREDICATED);
+#ifdef ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED
+ // IF_SVE_GG_3A
+ // LUTI2 <Zd>.B, {<Zn>.B }, <Zm>[<index>]
+ // luti2 z0.b, {z0.b}, z0[0] // 01000101-00100000-10110000-00000000
+ // CHECK-INST: luti2 z0.b, { z0.b }, z0[0]
+ // CHECK-ENCODING: [0x00,0xb0,0x20,0x45]
+ theEmitter->emitIns_R_R_R_I(INS_sve_luti2, EA_SCALABLE, REG_V0, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B);
+ // luti2 z21.b, {z10.b}, z21[1] // 01000101-01110101-10110001-01010101
+ // CHECK-INST: luti2 z21.b, { z10.b }, z21[1]
+ // CHECK-ENCODING: [0x55,0xb1,0x75,0x45]
+ theEmitter->emitIns_R_R_R_I(INS_sve_luti2, EA_SCALABLE, REG_V21, REG_V10, REG_V21, 1, INS_OPTS_SCALABLE_B);
+
+ // IF_SVE_GH_3B
+ // LUTI4 <Zd>.H, {<Zn1>.H, <Zn2>.H }, <Zm>[<index>]
+ // luti4 z0.h, {z0.h, z1.h}, z0[0] // 01000101-00100000-10110100-00000000
+ // CHECK-INST: luti4 z0.h, { z0.h, z1.h }, z0[0]
+ // CHECK-ENCODING: [0x00,0xb4,0x20,0x45]
+ theEmitter->emitIns_R_R_R_I(INS_sve_luti4, EA_SCALABLE, REG_V0, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H, EA_UNKNOWN,
+ INS_SCALABLE_OPTS_WITH_VECTOR_PAIR);
+ // luti4 z21.h, {z10.h, z11.h}, z21[1] // 01000101-01110101-10110101-01010101
+ // CHECK-INST: luti4 z21.h, { z10.h, z11.h }, z21[1]
+ // CHECK-ENCODING: [0x55,0xb5,0x75,0x45]
+ theEmitter->emitIns_R_R_R_I(INS_sve_luti4, EA_SCALABLE, REG_V21, REG_V10, REG_V21, 1, INS_OPTS_SCALABLE_H,
+ EA_UNKNOWN, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR);
+ // luti4 z31.h, {z31.h, z0.h}, z31[3] // 01000101-11111111-10110111-11111111
+ // CHECK-INST: luti4 z31.h, { z31.h, z0.h }, z31[3]
+ // CHECK-ENCODING: [0xff,0xb7,0xff,0x45]
+ theEmitter->emitIns_R_R_R_I(INS_sve_luti4, EA_SCALABLE, REG_V31, REG_V31, REG_V31, 3, INS_OPTS_SCALABLE_H,
+ EA_UNKNOWN, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR);
+
+ // IF_SVE_GH_3B_B
+ // LUTI4 <Zd>.H, {<Zn>.H }, <Zm>[<index>]
+ // luti4 z0.h, {z0.h}, z0[0] // 01000101-00100000-10111100-00000000
+ // CHECK-INST: luti4 z0.h, { z0.h }, z0[0]
+ // CHECK-ENCODING: [0x00,0xbc,0x20,0x45]
+ theEmitter->emitIns_R_R_R_I(INS_sve_luti4, EA_SCALABLE, REG_V0, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H);
+ // luti4 z21.h, {z10.h}, z21[1] // 01000101-01110101-10111101-01010101
+ // CHECK-INST: luti4 z21.h, { z10.h }, z21[1]
+ // CHECK-ENCODING: [0x55,0xbd,0x75,0x45]
+ theEmitter->emitIns_R_R_R_I(INS_sve_luti4, EA_SCALABLE, REG_V21, REG_V10, REG_V21, 1, INS_OPTS_SCALABLE_H);
+ // luti4 z31.h, {z31.h}, z31[3] // 01000101-11111111-10111111-11111111
+ // CHECK-INST: luti4 z31.h, { z31.h }, z31[3]
+ // CHECK-ENCODING: [0xff,0xbf,0xff,0x45]
+ theEmitter->emitIns_R_R_R_I(INS_sve_luti4, EA_SCALABLE, REG_V31, REG_V31, REG_V31, 3, INS_OPTS_SCALABLE_H);
+
+ // IF_SVE_GG_3B
+ // LUTI2 <Zd>.H, {<Zn>.H }, <Zm>[<index>]
+ // luti2 z0.h, {z0.h}, z0[0] // 01000101-00100000-10101000-00000000
+ // CHECK-INST: luti2 z0.h, { z0.h }, z0[0]
+ // CHECK-ENCODING: [0x00,0xa8,0x20,0x45]
+ theEmitter->emitIns_R_R_R_I(INS_sve_luti2, EA_SCALABLE, REG_V0, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H);
+ // luti2 z21.h, {z10.h}, z21[3] // 01000101-01110101-10111001-01010101
+ // CHECK-INST: luti2 z21.h, { z10.h }, z21[3]
+ // CHECK-ENCODING: [0x55,0xb9,0x75,0x45]
+ theEmitter->emitIns_R_R_R_I(INS_sve_luti2, EA_SCALABLE, REG_V21, REG_V10, REG_V21, 3, INS_OPTS_SCALABLE_H);
+ // luti2 z31.h, {z31.h}, z31[7] // 01000101-11111111-10111011-11111111
+ // CHECK-INST: luti2 z31.h, { z31.h }, z31[7]
+ // CHECK-ENCODING: [0xff,0xbb,0xff,0x45]
+ theEmitter->emitIns_R_R_R_I(INS_sve_luti2, EA_SCALABLE, REG_V31, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_H);
+
+ // IF_SVE_GH_3A
+ // LUTI4 <Zd>.B, {<Zn>.B }, <Zm>[<index>]
+ // luti4 z0.b, {z0.b}, z0[0] // 01000101-01100000-10100100-00000000
+ // CHECK-INST: luti4 z0.b, { z0.b }, z0[0]
+ // CHECK-ENCODING: [0x00,0xa4,0x60,0x45]
+ theEmitter->emitIns_R_R_R_I(INS_sve_luti4, EA_SCALABLE, REG_V0, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B);
+ // luti4 z31.b, {z31.b}, z31[1] // 01000101-11111111-10100111-11111111
+ // CHECK-INST: luti4 z31.b, { z31.b }, z31[1]
+ // CHECK-ENCODING: [0xff,0xa7,0xff,0x45]
+ theEmitter->emitIns_R_R_R_I(INS_sve_luti4, EA_SCALABLE, REG_V31, REG_V31, REG_V31, 1, INS_OPTS_SCALABLE_B);
+#endif // ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED
+
// IF_SVE_HY_3A
theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3,
INS_OPTS_SCALABLE_S_UXTW); // PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]
@@ -7983,6 +8477,319 @@ void CodeGen::genArm64EmitterUnitTestsSve()
INS_OPTS_SCALABLE_S); // LD1RB {<Zt>.S }, <Pg>/Z, [<Xn|SP>{, #<imm>}]
theEmitter->emitIns_R_R_R_I(INS_sve_ld1rb, EA_SCALABLE, REG_V1, REG_P0, REG_R9, 63,
INS_OPTS_SCALABLE_B); // LD1RB {<Zt>.D }, <Pg>/Z, [<Xn|SP>{, #<imm>}]
+
+ // IF_SVE_HF_2A
+ // FRECPE <Zd>.<T>, <Zn>.<T>
+ theEmitter->emitIns_R_R(INS_sve_frecpe, EA_SCALABLE, REG_V0, REG_V2, INS_OPTS_SCALABLE_H);
+ // FRSQRTE <Zd>.<T>, <Zn>.<T>
+ theEmitter->emitIns_R_R(INS_sve_frsqrte, EA_SCALABLE, REG_V5, REG_V3, INS_OPTS_SCALABLE_S);
+ // FRSQRTE <Zd>.<T>, <Zn>.<T>
+ theEmitter->emitIns_R_R(INS_sve_frsqrte, EA_SCALABLE, REG_V9, REG_V5, INS_OPTS_SCALABLE_D);
+
+ // IF_SVE_CH_2A
+ // SUNPKHI <Zd>.<T>, <Zn>.<Tb>
+ theEmitter->emitIns_R_R(INS_sve_sunpkhi, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H);
+ // SUNPKLO <Zd>.<T>, <Zn>.<Tb>
+ theEmitter->emitIns_R_R(INS_sve_sunpklo, EA_SCALABLE, REG_V1, REG_V5, INS_OPTS_SCALABLE_S);
+ // UUNPKHI <Zd>.<T>, <Zn>.<Tb>
+ theEmitter->emitIns_R_R(INS_sve_uunpkhi, EA_SCALABLE, REG_V5, REG_V1, INS_OPTS_SCALABLE_D);
+ // UUNPKLO <Zd>.<T>, <Zn>.<Tb>
+ theEmitter->emitIns_R_R(INS_sve_uunpklo, EA_SCALABLE, REG_V8, REG_V6, INS_OPTS_SCALABLE_S);
+
+ // IF_SVE_CG_2A
+ // REV <Zd>.<T>, <Zn>.<T>
+ theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V3, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ // REV <Zd>.<T>, <Zn>.<T>
+ theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ // REV <Zd>.<T>, <Zn>.<T>
+ theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V7, REG_V1, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ // REV <Zd>.<T>, <Zn>.<T>
+ theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V5, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+
+ // IF_SVE_CB_2A
+ // Note: EA_4BYTE used for B and H (source register is W)
+ // DUP <Zd>.<T>, <R><n|SP>
+ theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V0, REG_R1, INS_OPTS_SCALABLE_B);
+ // DUP <Zd>.<T>, <R><n|SP>
+ theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V2, REG_R3, INS_OPTS_SCALABLE_H);
+ // DUP <Zd>.<T>, <R><n|SP>
+ theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V1, REG_R5, INS_OPTS_SCALABLE_S);
+ // DUP <Zd>.<T>, <R><n|SP>
+ theEmitter->emitIns_R_R(INS_sve_dup, EA_8BYTE, REG_V4, REG_SP, INS_OPTS_SCALABLE_D);
+ // MOV <Zd>.<T>, <R><n|SP>
+ theEmitter->emitIns_R_R(INS_sve_mov, EA_4BYTE, REG_V4, REG_R2, INS_OPTS_SCALABLE_B);
+ // MOV <Zd>.<T>, <R><n|SP>
+ theEmitter->emitIns_R_R(INS_sve_mov, EA_4BYTE, REG_V4, REG_R2, INS_OPTS_SCALABLE_H);
+ // MOV <Zd>.<T>, <R><n|SP>
+ theEmitter->emitIns_R_R(INS_sve_mov, EA_4BYTE, REG_V1, REG_R3, INS_OPTS_SCALABLE_S);
+ // MOV <Zd>.<T>, <R><n|SP>
+ theEmitter->emitIns_R_R(INS_sve_mov, EA_8BYTE, REG_V5, REG_SP, INS_OPTS_SCALABLE_D);
+ // MOV <Zd>.<T>, <R><n|SP>
+ theEmitter->emitIns_R_R(INS_sve_mov, EA_8BYTE, REG_V2, REG_R9, INS_OPTS_SCALABLE_D);
+
+ // IF_SVE_BJ_2A
+ // FEXPA <Zd>.<T>, <Zn>.<T>
+ theEmitter->emitIns_R_R(INS_sve_fexpa, EA_SCALABLE, REG_V0, REG_V1, INS_OPTS_SCALABLE_H);
+ // FEXPA <Zd>.<T>, <Zn>.<T>
+ theEmitter->emitIns_R_R(INS_sve_fexpa, EA_SCALABLE, REG_V3, REG_V0, INS_OPTS_SCALABLE_S);
+ // FEXPA <Zd>.<T>, <Zn>.<T>
+ theEmitter->emitIns_R_R(INS_sve_fexpa, EA_SCALABLE, REG_V1, REG_V0, INS_OPTS_SCALABLE_D);
+
+#ifdef ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED
+ // IF_SVE_HH_2A
+ // BF1CVT <Zd>.H, <Zn>.B
+ theEmitter->emitIns_R_R(INS_sve_bf1cvt, EA_SCALABLE, REG_V2, REG_V3, INS_OPTS_SCALABLE_H);
+ // BF1CVTLT <Zd>.H, <Zn>.B
+ theEmitter->emitIns_R_R(INS_sve_bf1cvtlt, EA_SCALABLE, REG_V1, REG_V5, INS_OPTS_SCALABLE_H);
+ // BF2CVT <Zd>.H, <Zn>.B
+ theEmitter->emitIns_R_R(INS_sve_bf2cvt, EA_SCALABLE, REG_V6, REG_V2, INS_OPTS_SCALABLE_H);
+ // BF2CVTLT <Zd>.H, <Zn>.B
+ theEmitter->emitIns_R_R(INS_sve_bf2cvtlt, EA_SCALABLE, REG_V3, REG_V1, INS_OPTS_SCALABLE_H);
+ // F1CVT <Zd>.H, <Zn>.B
+ theEmitter->emitIns_R_R(INS_sve_f1cvt, EA_SCALABLE, REG_V6, REG_V7, INS_OPTS_SCALABLE_H);
+ // F1CVTLT <Zd>.H, <Zn>.B
+ theEmitter->emitIns_R_R(INS_sve_f1cvtlt, EA_SCALABLE, REG_V1, REG_V8, INS_OPTS_SCALABLE_H);
+ // F2CVT <Zd>.H, <Zn>.B
+ theEmitter->emitIns_R_R(INS_sve_f2cvt, EA_SCALABLE, REG_V3, REG_V4, INS_OPTS_SCALABLE_H);
+ // F2CVTLT <Zd>.H, <Zn>.B
+ theEmitter->emitIns_R_R(INS_sve_f2cvtlt, EA_SCALABLE, REG_V1, REG_V2, INS_OPTS_SCALABLE_H);
+#endif // ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED
+
+ // IF_SVE_BI_2A
+ // MOVPRFX <Zd>, <Zn>
+ theEmitter->emitIns_R_R(INS_sve_movprfx, EA_SCALABLE, REG_V3, REG_V5);
+
+ // IF_SVE_BF_2A
+ // ASR <Zd>.<T>, <Zn>.<T>, #<const>
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ // LSL <Zd>.<T>, <Zn>.<T>, #<const
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ // LSR <Zd>.<T>, <Zn>.<T>, #<const
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+ theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D,
+ INS_SCALABLE_OPTS_UNPREDICATED);
+
+ // IF_SVE_FT_2A
+ // SLI <Zd>.<T>, <Zn>.<T>, #<const>
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D);
+ // SRI <Zd>.<T>, <Zn>.<T>, #<const>
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D);
+
+ // IF_SVE_FU_2A
+ // SRSRA <Zda>.<T>, <Zn>.<T>, #<const>
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D);
+ // SSRA <Zda>.<T>, <Zn>.<T>, #<const>
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D);
+ // URSRA <Zda>.<T>, <Zn>.<T>, #<const>
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D);
+ // USRA <Zda>.<T>, <Zn>.<T>, #<const>
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D);
+
+ // IF_SVE_BX_2A
+ // DUPQ <Zd>.<T>, <Zn>.<T>[<imm>]
+ theEmitter->emitIns_R_R_I(INS_sve_dupq, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_dupq, EA_SCALABLE, REG_V21, REG_V10, 10, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_dupq, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_dupq, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_dupq, EA_SCALABLE, REG_V21, REG_V10, 5, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_dupq, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_H);
+ theEmitter->emitIns_R_R_I(INS_sve_dupq, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_dupq, EA_SCALABLE, REG_V21, REG_V10, 2, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_dupq, EA_SCALABLE, REG_V31, REG_V31, 3, INS_OPTS_SCALABLE_S);
+ theEmitter->emitIns_R_R_I(INS_sve_dupq, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D);
+ theEmitter->emitIns_R_R_I(INS_sve_dupq, EA_SCALABLE, REG_V31, REG_V31, 1, INS_OPTS_SCALABLE_D);
+
+ // IF_SVE_BY_2A
+ // EXTQ <Zdn>.B, <Zdn>.B, <Zm>.B, #<imm>
+ theEmitter->emitIns_R_R_I(INS_sve_extq, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B);
+ theEmitter->emitIns_R_R_I(INS_sve_extq, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_B);
}
#endif // defined(TARGET_ARM64) && defined(DEBUG)
diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp
index 8eb8d71b617a..1535ed68d9ee 100644
--- a/src/coreclr/jit/codegenarmarch.cpp
+++ b/src/coreclr/jit/codegenarmarch.cpp
@@ -441,6 +441,12 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode)
break;
#endif // TARGET_ARM64
+#ifdef SWIFT_SUPPORT
+ case GT_SWIFT_ERROR:
+ genCodeForSwiftErrorReg(treeNode);
+ break;
+#endif // SWIFT_SUPPORT
+
case GT_RELOAD:
// do nothing - reload is just a marker.
// The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
@@ -507,7 +513,6 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode)
#endif
break;
- case GT_STORE_DYN_BLK:
case GT_STORE_BLK:
genCodeForStoreBlk(treeNode->AsBlk());
break;
@@ -3369,6 +3374,17 @@ void CodeGen::genCall(GenTreeCall* call)
genDefineTempLabel(genCreateTempLabel());
}
+#ifdef SWIFT_SUPPORT
+ // Clear the Swift error register before calling a Swift method,
+ // so we can check if it set the error register after returning.
+ // (Flag is only set if we know we need to check the error register)
+ if ((call->gtCallMoreFlags & GTF_CALL_M_SWIFT_ERROR_HANDLING) != 0)
+ {
+ assert(call->unmgdCallConv == CorInfoCallConvExtension::Swift);
+ instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_SWIFT_ERROR);
+ }
+#endif // SWIFT_SUPPORT
+
genCallInstruction(call);
genDefinePendingCallLabel(call);
@@ -4551,14 +4567,14 @@ void CodeGen::inst_JMP(emitJumpKind jmp, BasicBlock* tgtBlock)
}
//------------------------------------------------------------------------
-// genCodeForStoreBlk: Produce code for a GT_STORE_DYN_BLK/GT_STORE_BLK node.
+// genCodeForStoreBlk: Produce code for a GT_STORE_BLK node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForStoreBlk(GenTreeBlk* blkOp)
{
- assert(blkOp->OperIs(GT_STORE_DYN_BLK, GT_STORE_BLK));
+ assert(blkOp->OperIs(GT_STORE_BLK));
bool isCopyBlk = blkOp->OperIsCopyBlkOp();
diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp
index d965d5c91f42..bf2bfaa28aad 100644
--- a/src/coreclr/jit/codegencommon.cpp
+++ b/src/coreclr/jit/codegencommon.cpp
@@ -6452,6 +6452,47 @@ void CodeGen::genFnProlog()
#pragma warning(pop)
#endif
+//----------------------------------------------------------------------------------
+// genEmitJumpTable: emit jump table and return its base offset
+//
+// Arguments:
+// treeNode - the GT_JMPTABLE node
+// relativeAddr - if true, references are treated as 4-byte relative addresses,
+// otherwise they are absolute pointers
+//
+// Return Value:
+// base offset to jump table
+//
+// Assumption:
+// The current basic block in process ends with a switch statement
+//
+unsigned CodeGen::genEmitJumpTable(GenTree* treeNode, bool relativeAddr)
+{
+ noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH));
+ assert(treeNode->OperGet() == GT_JMPTABLE);
+
+ emitter* emit = GetEmitter();
+ const unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount;
+ FlowEdge** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab;
+ const unsigned jmpTabBase = emit->emitBBTableDataGenBeg(jumpCount, relativeAddr);
+
+ JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", compiler->compMethodID, jmpTabBase);
+
+ for (unsigned i = 0; i < jumpCount; i++)
+ {
+ BasicBlock* target = (*jumpTable)->getDestinationBlock();
+ jumpTable++;
+ noway_assert(target->HasFlag(BBF_HAS_LABEL));
+
+ JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
+
+ emit->emitDataGenData(i, target);
+ };
+
+ emit->emitDataGenEnd();
+ return jmpTabBase;
+}
+
//------------------------------------------------------------------------
// getCallTarget - Get the node that evaluates to the call target
//
@@ -6546,7 +6587,7 @@ void CodeGen::genDefinePendingCallLabel(GenTreeCall* call)
// For certain indirect calls we may introduce helper calls before that we need to skip:
// - CFG may introduce a call to the validator first
// - Generic virtual methods may compute the target dynamically through a separate helper call
- // - memset/memcpy helper calls emitted for GT_STORE_DYN_BLK/GT_STORE_BLK
+ // - memset/memcpy helper calls emitted for GT_STORE_BLK
if (call->IsHelperCall())
{
switch (compiler->eeGetHelperNum(call->gtCallMethHnd))
@@ -8403,7 +8444,9 @@ void CodeGen::genPoisonFrame(regMaskTP regLiveIn)
GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_0, (int)varNum, 0);
instGen_Set_Reg_To_Imm(EA_4BYTE, REG_ARG_1, static_cast<char>(poisonVal));
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_ARG_2, size);
- genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
+
+ // Call non-managed memset
+ genEmitHelperCall(CORINFO_HELP_NATIVE_MEMSET, 0, EA_UNKNOWN);
// May kill REG_SCRATCH, so we need to reload it.
hasPoisonImm = false;
#endif
@@ -8567,3 +8610,31 @@ void CodeGen::genCodeForReuseVal(GenTree* treeNode)
genDefineTempLabel(genCreateTempLabel());
}
}
+
+#ifdef SWIFT_SUPPORT
+//---------------------------------------------------------------------
+// genCodeForSwiftErrorReg - generate code for a GT_SWIFT_ERROR node
+//
+// Arguments
+// tree - the GT_SWIFT_ERROR node
+//
+// Return value:
+// None
+//
+void CodeGen::genCodeForSwiftErrorReg(GenTree* tree)
+{
+ assert(tree->OperIs(GT_SWIFT_ERROR));
+
+ var_types targetType = tree->TypeGet();
+ regNumber targetReg = tree->GetRegNum();
+
+ // LSRA should have picked REG_SWIFT_ERROR as the destination register, too
+ // (see LinearScan::BuildNode for an explanation of why we want this)
+ assert(targetReg == REG_SWIFT_ERROR);
+
+ inst_Mov(targetType, targetReg, REG_SWIFT_ERROR, /* canSkip */ true);
+ genTransferRegGCState(targetReg, REG_SWIFT_ERROR);
+
+ genProduceReg(tree);
+}
+#endif // SWIFT_SUPPORT
diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp
index 913f3a47002a..78df10811a4c 100644
--- a/src/coreclr/jit/codegenlinear.cpp
+++ b/src/coreclr/jit/codegenlinear.cpp
@@ -1935,18 +1935,9 @@ void CodeGen::genSetBlockSize(GenTreeBlk* blkNode, regNumber sizeReg)
{
if (sizeReg != REG_NA)
{
- unsigned blockSize = blkNode->Size();
- if (!blkNode->OperIs(GT_STORE_DYN_BLK))
- {
- assert((blkNode->gtRsvdRegs & genRegMask(sizeReg)) != 0);
- // This can go via helper which takes the size as a native uint.
- instGen_Set_Reg_To_Imm(EA_PTRSIZE, sizeReg, blockSize);
- }
- else
- {
- GenTree* sizeNode = blkNode->AsStoreDynBlk()->gtDynamicSize;
- inst_Mov(sizeNode->TypeGet(), sizeReg, sizeNode->GetRegNum(), /* canSkip */ true);
- }
+ assert((blkNode->gtRsvdRegs & genRegMask(sizeReg)) != 0);
+ // This can go via helper which takes the size as a native uint.
+ instGen_Set_Reg_To_Imm(EA_PTRSIZE, sizeReg, blkNode->Size());
}
}
@@ -2052,12 +2043,6 @@ void CodeGen::genConsumeBlockOp(GenTreeBlk* blkNode, regNumber dstReg, regNumber
genConsumeReg(dstAddr);
// The source may be a local or in a register; 'genConsumeBlockSrc' will check that.
genConsumeBlockSrc(blkNode);
- // 'genSetBlockSize' (called below) will ensure that a register has been reserved as needed
- // in the case where the size is a constant (i.e. it is not GT_STORE_DYN_BLK).
- if (blkNode->OperGet() == GT_STORE_DYN_BLK)
- {
- genConsumeReg(blkNode->AsStoreDynBlk()->gtDynamicSize);
- }
// Next, perform any necessary moves.
genCopyRegIfNeeded(dstAddr, dstReg);
diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp
index f28778595196..01e82be425a3 100644
--- a/src/coreclr/jit/codegenloongarch64.cpp
+++ b/src/coreclr/jit/codegenloongarch64.cpp
@@ -2927,33 +2927,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode)
// emits the table and an instruction to get the address of the first element
void CodeGen::genJumpTable(GenTree* treeNode)
{
- noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH));
- assert(treeNode->OperGet() == GT_JMPTABLE);
-
- unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount;
- FlowEdge** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab;
- unsigned jmpTabOffs;
- unsigned jmpTabBase;
-
- jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true);
-
- jmpTabOffs = 0;
-
- JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", compiler->compMethodID, jmpTabBase);
-
- for (unsigned i = 0; i < jumpCount; i++)
- {
- BasicBlock* target = (*jumpTable)->getDestinationBlock();
- jumpTable++;
- noway_assert(target->HasFlag(BBF_HAS_LABEL));
-
- JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
-
- GetEmitter()->emitDataGenData(i, target);
- };
-
- GetEmitter()->emitDataGenEnd();
-
+ unsigned jmpTabBase = genEmitJumpTable(treeNode, true);
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
@@ -5022,7 +4996,6 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode)
emit->emitIns_R_L(INS_ld_d, EA_PTRSIZE, genPendingCallLabel, targetReg);
break;
- case GT_STORE_DYN_BLK:
case GT_STORE_BLK:
genCodeForStoreBlk(treeNode->AsBlk());
break;
@@ -7249,14 +7222,14 @@ void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize,
}
//------------------------------------------------------------------------
-// genCodeForStoreBlk: Produce code for a GT_STORE_DYN_BLK/GT_STORE_BLK node.
+// genCodeForStoreBlk: Produce code for a GT_STORE_BLK node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForStoreBlk(GenTreeBlk* blkOp)
{
- assert(blkOp->OperIs(GT_STORE_DYN_BLK, GT_STORE_BLK));
+ assert(blkOp->OperIs(GT_STORE_BLK));
if (blkOp->gtBlkOpGcUnsafe)
{
diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp
index 6dc76478246b..c66511fa87b8 100644
--- a/src/coreclr/jit/codegenriscv64.cpp
+++ b/src/coreclr/jit/codegenriscv64.cpp
@@ -2850,32 +2850,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode)
// emits the table and an instruction to get the address of the first element
void CodeGen::genJumpTable(GenTree* treeNode)
{
- noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH));
- assert(treeNode->OperGet() == GT_JMPTABLE);
-
- unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount;
- FlowEdge** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab;
- unsigned jmpTabOffs;
- unsigned jmpTabBase;
-
- jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true);
-
- jmpTabOffs = 0;
-
- JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", compiler->compMethodID, jmpTabBase);
-
- for (unsigned i = 0; i < jumpCount; i++)
- {
- BasicBlock* target = (*jumpTable)->getDestinationBlock();
- noway_assert(target->HasFlag(BBF_HAS_LABEL));
-
- JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
-
- GetEmitter()->emitDataGenData(i, target);
- };
-
- GetEmitter()->emitDataGenEnd();
-
+ unsigned jmpTabBase = genEmitJumpTable(treeNode, true);
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
@@ -5102,7 +5077,6 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode)
emit->emitIns_R_L(INS_ld, EA_PTRSIZE, genPendingCallLabel, targetReg);
break;
- case GT_STORE_DYN_BLK:
case GT_STORE_BLK:
genCodeForStoreBlk(treeNode->AsBlk());
break;
@@ -7246,14 +7220,14 @@ void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize,
}
//------------------------------------------------------------------------
-// genCodeForStoreBlk: Produce code for a GT_STORE_DYN_BLK/GT_STORE_BLK node.
+// genCodeForStoreBlk: Produce code for a GT_STORE_BLK node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForStoreBlk(GenTreeBlk* blkOp)
{
- assert(blkOp->OperIs(GT_STORE_DYN_BLK, GT_STORE_BLK));
+ assert(blkOp->OperIs(GT_STORE_BLK));
if (blkOp->gtBlkOpGcUnsafe)
{
diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp
index 884ba901e5d8..223199f35c32 100644
--- a/src/coreclr/jit/codegenxarch.cpp
+++ b/src/coreclr/jit/codegenxarch.cpp
@@ -332,7 +332,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
#endif // !FEATURE_EH_FUNCLETS
// The BBJ_CALLFINALLYRET is used because the BBJ_CALLFINALLY can't point to the
- // jump target using bbTarget - that is already used to point
+ // jump target using bbTargetEdge - that is already used to point
// to the finally block. So just skip past the BBJ_CALLFINALLYRET unless the
// block is RETLESS.
if (!block->HasFlag(BBF_RETLESS_CALL))
@@ -2107,6 +2107,12 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode)
case GT_NOP:
break;
+#ifdef SWIFT_SUPPORT
+ case GT_SWIFT_ERROR:
+ genCodeForSwiftErrorReg(treeNode);
+ break;
+#endif // SWIFT_SUPPORT
+
case GT_KEEPALIVE:
genConsumeRegs(treeNode->AsOp()->gtOp1);
break;
@@ -2177,7 +2183,6 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode)
emit->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, genPendingCallLabel, treeNode->GetRegNum());
break;
- case GT_STORE_DYN_BLK:
case GT_STORE_BLK:
genCodeForStoreBlk(treeNode->AsBlk());
break;
@@ -3051,7 +3056,7 @@ ALLOC_DONE:
void CodeGen::genCodeForStoreBlk(GenTreeBlk* storeBlkNode)
{
- assert(storeBlkNode->OperIs(GT_STORE_DYN_BLK, GT_STORE_BLK));
+ assert(storeBlkNode->OperIs(GT_STORE_BLK));
bool isCopyBlk = storeBlkNode->OperIsCopyBlkOp();
@@ -4298,33 +4303,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode)
// emits the table and an instruction to get the address of the first element
void CodeGen::genJumpTable(GenTree* treeNode)
{
- noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH));
- assert(treeNode->OperGet() == GT_JMPTABLE);
-
- unsigned jumpCount = compiler->compCurBB->GetSwitchTargets()->bbsCount;
- FlowEdge** jumpTable = compiler->compCurBB->GetSwitchTargets()->bbsDstTab;
- unsigned jmpTabOffs;
- unsigned jmpTabBase;
-
- jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true);
-
- jmpTabOffs = 0;
-
- JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", compiler->compMethodID, jmpTabBase);
-
- for (unsigned i = 0; i < jumpCount; i++)
- {
- BasicBlock* target = (*jumpTable)->getDestinationBlock();
- jumpTable++;
- noway_assert(target->HasFlag(BBF_HAS_LABEL));
-
- JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
-
- GetEmitter()->emitDataGenData(i, target);
- };
-
- GetEmitter()->emitDataGenEnd();
-
+ unsigned jmpTabBase = genEmitJumpTable(treeNode, true);
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
@@ -6035,6 +6014,17 @@ void CodeGen::genCall(GenTreeCall* call)
instGen(INS_vzeroupper);
}
+#ifdef SWIFT_SUPPORT
+ // Clear the Swift error register before calling a Swift method,
+ // so we can check if it set the error register after returning.
+ // (Flag is only set if we know we need to check the error register)
+ if ((call->gtCallMoreFlags & GTF_CALL_M_SWIFT_ERROR_HANDLING) != 0)
+ {
+ assert(call->unmgdCallConv == CorInfoCallConvExtension::Swift);
+ instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_SWIFT_ERROR);
+ }
+#endif // SWIFT_SUPPORT
+
genCallInstruction(call X86_ARG(stackArgBytes));
genDefinePendingCallLabel(call);
@@ -10789,9 +10779,12 @@ void CodeGen::genFuncletProlog(BasicBlock* block)
compiler->unwindEndProlog();
// TODO We may need EBP restore sequence here if we introduce PSPSym
+ CLANG_FORMAT_COMMENT_ANCHOR;
+#ifdef UNIX_X86_ABI
// Add a padding for 16-byte alignment
inst_RV_IV(INS_sub, REG_SPBASE, 12, EA_PTRSIZE);
+#endif
}
/*****************************************************************************
@@ -10810,8 +10803,10 @@ void CodeGen::genFuncletEpilog()
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
+#ifdef UNIX_X86_ABI
// Revert a padding that was added for 16-byte alignment
inst_RV_IV(INS_add, REG_SPBASE, 12, EA_PTRSIZE);
+#endif
instGen_Return(0);
}
diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp
index 9fded7a13ccb..2bba7db9ae7a 100644
--- a/src/coreclr/jit/compiler.cpp
+++ b/src/coreclr/jit/compiler.cpp
@@ -4893,6 +4893,7 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl
bool doValueNum = true;
bool doLoopHoisting = true;
bool doCopyProp = true;
+ bool doOptimizeIVs = true;
bool doBranchOpt = true;
bool doCse = true;
bool doAssertionProp = true;
@@ -4905,6 +4906,7 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl
doSsa = (JitConfig.JitDoSsa() != 0);
doEarlyProp = doSsa && (JitConfig.JitDoEarlyProp() != 0);
doValueNum = doSsa && (JitConfig.JitDoValueNumber() != 0);
+ doOptimizeIVs = doSsa && (JitConfig.JitDoOptimizeIVs() != 0);
doLoopHoisting = doValueNum && (JitConfig.JitDoLoopHoisting() != 0);
doCopyProp = doValueNum && (JitConfig.JitDoCopyProp() != 0);
doBranchOpt = doValueNum && (JitConfig.JitDoRedundantBranchOpts() != 0);
@@ -5005,6 +5007,13 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl
DoPhase(this, PHASE_OPTIMIZE_INDEX_CHECKS, &Compiler::rangeCheckPhase);
}
+ if (doOptimizeIVs)
+ {
+ // Simplify and optimize induction variables used in natural loops
+ //
+ DoPhase(this, PHASE_OPTIMIZE_INDUCTION_VARIABLES, &Compiler::optInductionVariables);
+ }
+
if (doVNBasedDeadStoreRemoval)
{
// Note: this invalidates SSA and value numbers on tree nodes.
@@ -9409,6 +9418,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#pragma comment(linker, "/include:cLoops")
#pragma comment(linker, "/include:cLoopsA")
#pragma comment(linker, "/include:cLoop")
+#pragma comment(linker, "/include:cScev")
#pragma comment(linker, "/include:cTreeFlags")
#pragma comment(linker, "/include:cVN")
@@ -9434,6 +9444,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#pragma comment(linker, "/include:dCVarSet")
#pragma comment(linker, "/include:dLoop")
#pragma comment(linker, "/include:dLoops")
+#pragma comment(linker, "/include:dScev")
#pragma comment(linker, "/include:dTreeFlags")
#pragma comment(linker, "/include:dVN")
@@ -9677,24 +9688,39 @@ JITDBGAPI void __cdecl cCVarSet(Compiler* comp, VARSET_VALARG_TP vars)
JITDBGAPI void __cdecl cLoops(Compiler* comp)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
- printf("===================================================================== *NewLoops %u\n", sequenceNumber++);
+ printf("===================================================================== *Loops %u\n", sequenceNumber++);
FlowGraphNaturalLoops::Dump(comp->m_loops);
}
JITDBGAPI void __cdecl cLoopsA(Compiler* comp, FlowGraphNaturalLoops* loops)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
- printf("===================================================================== *NewLoopsA %u\n", sequenceNumber++);
+ printf("===================================================================== *LoopsA %u\n", sequenceNumber++);
FlowGraphNaturalLoops::Dump(loops);
}
JITDBGAPI void __cdecl cLoop(Compiler* comp, FlowGraphNaturalLoop* loop)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
- printf("===================================================================== *NewLoop %u\n", sequenceNumber++);
+ printf("===================================================================== *Loop %u\n", sequenceNumber++);
FlowGraphNaturalLoop::Dump(loop);
}
+JITDBGAPI void __cdecl cScev(Compiler* comp, Scev* scev)
+{
+ static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
+ printf("===================================================================== *Scev %u\n", sequenceNumber++);
+ if (scev == nullptr)
+ {
+ printf(" NULL\n");
+ }
+ else
+ {
+ scev->Dump(comp);
+ printf("\n");
+ }
+}
+
JITDBGAPI void __cdecl cTreeFlags(Compiler* comp, GenTree* tree)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
@@ -9920,7 +9946,6 @@ JITDBGAPI void __cdecl cTreeFlags(Compiler* comp, GenTree* tree)
case GT_BLK:
case GT_STORE_BLK:
- case GT_STORE_DYN_BLK:
if (tree->gtFlags & GTF_IND_VOLATILE)
{
@@ -10285,6 +10310,11 @@ JITDBGAPI void __cdecl dLoop(FlowGraphNaturalLoop* loop)
cLoop(JitTls::GetCompiler(), loop);
}
+JITDBGAPI void __cdecl dScev(Scev* scev)
+{
+ cScev(JitTls::GetCompiler(), scev);
+}
+
JITDBGAPI void __cdecl dTreeFlags(GenTree* tree)
{
cTreeFlags(JitTls::GetCompiler(), tree);
diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h
index c8d7fe49b62a..b69ccd18f964 100644
--- a/src/coreclr/jit/compiler.h
+++ b/src/coreclr/jit/compiler.h
@@ -42,6 +42,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "jitexpandarray.h"
#include "tinyarray.h"
#include "valuenum.h"
+#include "scev.h"
#include "namedintrinsiclist.h"
#ifdef LATE_DISASM
#include "disasm.h"
@@ -1583,9 +1584,10 @@ enum class ProfileChecks : unsigned int
CHECK_NONE = 0,
CHECK_CLASSIC = 1 << 0, // check "classic" jit weights
CHECK_HASLIKELIHOOD = 1 << 1, // check all FlowEdges for hasLikelihood
- CHECK_LIKELY = 1 << 2, // fully check likelihood based weights
- RAISE_ASSERT = 1 << 3, // assert on check failure
- CHECK_ALL_BLOCKS = 1 << 4, // check blocks even if bbHasProfileWeight is false
+ CHECK_LIKELIHOODSUM = 1 << 2, // check block succesor likelihoods sum to 1
+ CHECK_LIKELY = 1 << 3, // fully check likelihood based weights
+ RAISE_ASSERT = 1 << 4, // assert on check failure
+ CHECK_ALL_BLOCKS = 1 << 5, // check blocks even if bbHasProfileWeight is false
};
inline constexpr ProfileChecks operator ~(ProfileChecks a)
@@ -3364,9 +3366,6 @@ public:
GenTreeBlk* gtNewStoreBlkNode(
ClassLayout* layout, GenTree* addr, GenTree* data, GenTreeFlags indirFlags = GTF_EMPTY);
- GenTreeStoreDynBlk* gtNewStoreDynBlkNode(
- GenTree* addr, GenTree* data, GenTree* dynamicSize, GenTreeFlags indirFlags = GTF_EMPTY);
-
GenTreeStoreInd* gtNewStoreIndNode(
var_types type, GenTree* addr, GenTree* data, GenTreeFlags indirFlags = GTF_EMPTY);
@@ -4368,7 +4367,11 @@ protected:
void impCheckForPInvokeCall(
GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block);
GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo());
- void impPopArgsForUnmanagedCall(GenTreeCall* call, CORINFO_SIG_INFO* sig);
+ void impPopArgsForUnmanagedCall(GenTreeCall* call, CORINFO_SIG_INFO* sig, /* OUT */ CallArg** swiftErrorArg);
+
+#ifdef SWIFT_SUPPORT
+ void impAppendSwiftErrorStore(GenTreeCall* call, CallArg* const swiftErrorArg);
+#endif // SWIFT_SUPPORT
void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall);
void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall);
@@ -4973,7 +4976,7 @@ public:
#ifdef DEBUG
jitstd::vector<BasicBlock*>* fgBBOrder; // ordered vector of BBs
#endif
- // Used as a quick check for whether loop alignment should look for natural loops.
+ // Used as a quick check for whether phases downstream of loop finding should look for natural loops.
// If true: there may or may not be any natural loops in the flow graph, so try to find them
// If false: there's definitely not any natural loops in the flow graph
bool fgMightHaveNaturalLoops;
@@ -5079,34 +5082,31 @@ public:
void fgExtendEHRegionBefore(BasicBlock* block);
void fgExtendEHRegionAfter(BasicBlock* block);
- BasicBlock* fgNewBBbefore(BBKinds jumpKind, BasicBlock* block, bool extendRegion, BasicBlock* jumpDest = nullptr);
+ BasicBlock* fgNewBBbefore(BBKinds jumpKind, BasicBlock* block, bool extendRegion);
- BasicBlock* fgNewBBafter(BBKinds jumpKind, BasicBlock* block, bool extendRegion, BasicBlock* jumpDest = nullptr);
+ BasicBlock* fgNewBBafter(BBKinds jumpKind, BasicBlock* block, bool extendRegion);
- BasicBlock* fgNewBBFromTreeAfter(BBKinds jumpKind, BasicBlock* block, GenTree* tree, DebugInfo& debugInfo, BasicBlock* jumpDest = nullptr, bool updateSideEffects = false);
+ BasicBlock* fgNewBBFromTreeAfter(BBKinds jumpKind, BasicBlock* block, GenTree* tree, DebugInfo& debugInfo, bool updateSideEffects = false);
BasicBlock* fgNewBBinRegion(BBKinds jumpKind,
unsigned tryIndex,
unsigned hndIndex,
BasicBlock* nearBlk,
- BasicBlock* jumpDest = nullptr,
bool putInFilter = false,
bool runRarely = false,
bool insertAtEnd = false);
BasicBlock* fgNewBBinRegion(BBKinds jumpKind,
BasicBlock* srcBlk,
- BasicBlock* jumpDest = nullptr,
bool runRarely = false,
bool insertAtEnd = false);
- BasicBlock* fgNewBBinRegion(BBKinds jumpKind, BasicBlock* jumpDest = nullptr);
+ BasicBlock* fgNewBBinRegion(BBKinds jumpKind);
BasicBlock* fgNewBBinRegionWorker(BBKinds jumpKind,
BasicBlock* afterBlk,
unsigned xcptnIndex,
- bool putInTryRegion,
- BasicBlock* jumpDest = nullptr);
+ bool putInTryRegion);
void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk);
void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk);
@@ -5827,15 +5827,15 @@ public:
public:
// For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement,
// skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.)
- // SwitchUniqueSuccSet contains the non-duplicated switch targets.
+ // SwitchUniqueSuccSet contains the non-duplicated switch successor edges.
// Code that modifies the flowgraph (such as by renumbering blocks) must call Compiler::InvalidateUniqueSwitchSuccMap,
// and code that modifies the targets of a switch block must call Compiler::fgInvalidateSwitchDescMapEntry.
// If the unique targets of a switch block are needed later, they will be recomputed, ensuring they're up-to-date.
struct SwitchUniqueSuccSet
{
- unsigned numDistinctSuccs; // Number of distinct targets of the switch.
- BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target
- // successors.
+ unsigned numDistinctSuccs; // Number of distinct targets of the switch.
+ FlowEdge** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target
+ // successor edges.
};
typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap;
@@ -5878,8 +5878,6 @@ public:
FlowEdge* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, FlowEdge*** ptrToPred);
- FlowEdge* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred);
-
void fgRemoveRefPred(FlowEdge* edge);
FlowEdge* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred);
@@ -5898,8 +5896,6 @@ public:
void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* oldTarget, BasicBlock* newTarget);
- void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred);
-
void fgReplacePred(FlowEdge* edge, BasicBlock* const newPred);
// initializingPreds is only 'true' when we are computing preds in fgLinkBasicBlocks()
@@ -6459,7 +6455,6 @@ private:
public:
GenTree* fgMorphInitBlock(GenTree* tree);
GenTree* fgMorphCopyBlock(GenTree* tree);
- GenTree* fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree);
private:
GenTree* fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optAssertionPropDone = nullptr);
void fgTryReplaceStructLocalWithField(GenTree* tree);
@@ -7057,6 +7052,7 @@ protected:
unsigned optCSEstart; // The first local variable number that is a CSE
unsigned optCSEattempt; // The number of CSEs attempted so far.
unsigned optCSEcount; // The total count of CSEs introduced.
+ unsigned optCSEunmarks; // Number of CSE trees unmarked
weight_t optCSEweight; // The weight of the current block when we are doing PerformCSE
CSE_HeuristicCommon* optCSEheuristic; // CSE Heuristic to use for this method
@@ -7401,6 +7397,18 @@ public:
BasicBlock* basicBlock);
#endif
+ PhaseStatus optInductionVariables();
+ bool optCanSinkWidenedIV(unsigned lclNum, FlowGraphNaturalLoop* loop);
+ bool optIsIVWideningProfitable(unsigned lclNum,
+ BasicBlock* initBlock,
+ bool initedToConstant,
+ FlowGraphNaturalLoop* loop,
+ ArrayStack<Statement*>& ivUses);
+ void optBestEffortReplaceNarrowIVUses(
+ unsigned lclNum, unsigned ssaNum, unsigned newLclNum, BasicBlock* block, Statement* firstStmt);
+ void optReplaceWidenedIV(unsigned lclNum, unsigned ssaNum, unsigned newLclNum, Statement* stmt);
+ void optSinkWidenedIV(unsigned lclNum, unsigned newLclNum, FlowGraphNaturalLoop* loop);
+
// Redundant branch opts
//
PhaseStatus optRedundantBranches();
@@ -11314,6 +11322,7 @@ public:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
case GT_NOP:
+ case GT_SWIFT_ERROR:
break;
// Lclvar unary operators
@@ -11441,28 +11450,6 @@ public:
break;
}
- case GT_STORE_DYN_BLK:
- {
- GenTreeStoreDynBlk* const dynBlock = node->AsStoreDynBlk();
-
- result = WalkTree(&dynBlock->gtOp1, dynBlock);
- if (result == fgWalkResult::WALK_ABORT)
- {
- return result;
- }
- result = WalkTree(&dynBlock->gtOp2, dynBlock);
- if (result == fgWalkResult::WALK_ABORT)
- {
- return result;
- }
- result = WalkTree(&dynBlock->gtDynamicSize, dynBlock);
- if (result == fgWalkResult::WALK_ABORT)
- {
- return result;
- }
- break;
- }
-
case GT_CALL:
{
GenTreeCall* const call = node->AsCall();
diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp
index 67f5c59d9326..05842d837395 100644
--- a/src/coreclr/jit/compiler.hpp
+++ b/src/coreclr/jit/compiler.hpp
@@ -664,27 +664,27 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func)
return VisitEHSuccs(comp, func);
case BBJ_CALLFINALLY:
- RETURN_ON_ABORT(func(bbTarget));
+ RETURN_ON_ABORT(func(GetTarget()));
return ::VisitEHSuccs</* skipJumpDest */ true, TFunc>(comp, this, func);
case BBJ_CALLFINALLYRET:
// These are "pseudo-blocks" and control never actually flows into them
// (codegen directly jumps to its successor after finally calls).
- return func(bbTarget);
+ return func(GetTarget());
case BBJ_EHCATCHRET:
case BBJ_EHFILTERRET:
case BBJ_LEAVE:
case BBJ_ALWAYS:
- RETURN_ON_ABORT(func(bbTarget));
+ RETURN_ON_ABORT(func(GetTarget()));
return VisitEHSuccs(comp, func);
case BBJ_COND:
- RETURN_ON_ABORT(func(bbFalseTarget));
+ RETURN_ON_ABORT(func(GetFalseTarget()));
- if (bbTrueTarget != bbFalseTarget)
+ if (!TrueEdgeIs(GetFalseEdge()))
{
- RETURN_ON_ABORT(func(bbTrueTarget));
+ RETURN_ON_ABORT(func(GetTrueTarget()));
}
return VisitEHSuccs(comp, func);
@@ -694,7 +694,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func)
Compiler::SwitchUniqueSuccSet sd = comp->GetDescriptorForSwitch(this);
for (unsigned i = 0; i < sd.numDistinctSuccs; i++)
{
- RETURN_ON_ABORT(func(sd.nonDuplicates[i]));
+ RETURN_ON_ABORT(func(sd.nonDuplicates[i]->getDestinationBlock()));
}
return VisitEHSuccs(comp, func);
@@ -744,14 +744,14 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func)
case BBJ_EHFILTERRET:
case BBJ_LEAVE:
case BBJ_ALWAYS:
- return func(bbTarget);
+ return func(GetTarget());
case BBJ_COND:
- RETURN_ON_ABORT(func(bbFalseTarget));
+ RETURN_ON_ABORT(func(GetFalseTarget()));
- if (bbTrueTarget != bbFalseTarget)
+ if (!TrueEdgeIs(GetFalseEdge()))
{
- RETURN_ON_ABORT(func(bbTrueTarget));
+ RETURN_ON_ABORT(func(GetTrueTarget()));
}
return BasicBlockVisit::Continue;
@@ -761,7 +761,7 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func)
Compiler::SwitchUniqueSuccSet sd = comp->GetDescriptorForSwitch(this);
for (unsigned i = 0; i < sd.numDistinctSuccs; i++)
{
- RETURN_ON_ABORT(func(sd.nonDuplicates[i]));
+ RETURN_ON_ABORT(func(sd.nonDuplicates[i]->getDestinationBlock()));
}
return BasicBlockVisit::Continue;
@@ -4246,6 +4246,7 @@ void GenTree::VisitOperands(TVisitor visitor)
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
case GT_NOP:
+ case GT_SWIFT_ERROR:
return;
// Unary operators with an optional operand
@@ -4361,21 +4362,6 @@ void GenTree::VisitOperands(TVisitor visitor)
return;
}
- case GT_STORE_DYN_BLK:
- {
- GenTreeStoreDynBlk* const dynBlock = this->AsStoreDynBlk();
- if (visitor(dynBlock->gtOp1) == VisitResult::Abort)
- {
- return;
- }
- if (visitor(dynBlock->gtOp2) == VisitResult::Abort)
- {
- return;
- }
- visitor(dynBlock->gtDynamicSize);
- return;
- }
-
case GT_CALL:
{
GenTreeCall* const call = this->AsCall();
diff --git a/src/coreclr/jit/compmemkind.h b/src/coreclr/jit/compmemkind.h
index 835d85f798d2..e986682894c3 100644
--- a/src/coreclr/jit/compmemkind.h
+++ b/src/coreclr/jit/compmemkind.h
@@ -50,6 +50,7 @@ CompMemKindMacro(LoopOpt)
CompMemKindMacro(LoopClone)
CompMemKindMacro(LoopUnroll)
CompMemKindMacro(LoopHoist)
+CompMemKindMacro(LoopIVOpts)
CompMemKindMacro(Unknown)
CompMemKindMacro(RangeCheck)
CompMemKindMacro(CopyProp)
diff --git a/src/coreclr/jit/compphases.h b/src/coreclr/jit/compphases.h
index 239309853197..10b60167be42 100644
--- a/src/coreclr/jit/compphases.h
+++ b/src/coreclr/jit/compphases.h
@@ -84,6 +84,7 @@ CompPhaseNameMacro(PHASE_BUILD_SSA_DF, "SSA: DF",
CompPhaseNameMacro(PHASE_BUILD_SSA_INSERT_PHIS, "SSA: insert phis", false, PHASE_BUILD_SSA, false)
CompPhaseNameMacro(PHASE_BUILD_SSA_RENAME, "SSA: rename", false, PHASE_BUILD_SSA, false)
CompPhaseNameMacro(PHASE_EARLY_PROP, "Early Value Propagation", false, -1, false)
+CompPhaseNameMacro(PHASE_OPTIMIZE_INDUCTION_VARIABLES, "Optimize Induction Variables", false, -1, false)
CompPhaseNameMacro(PHASE_VALUE_NUMBER, "Do value numbering", false, -1, false)
CompPhaseNameMacro(PHASE_OPTIMIZE_INDEX_CHECKS, "Optimize index checks", false, -1, false)
CompPhaseNameMacro(PHASE_OPTIMIZE_VALNUM_CSES, "Optimize Valnum CSEs", false, -1, false)
diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp
index ecbfe659be10..bb323584467e 100644
--- a/src/coreclr/jit/emit.cpp
+++ b/src/coreclr/jit/emit.cpp
@@ -8609,9 +8609,10 @@ void emitter::emitGCvarLiveSet(int offs, GCtype gcType, BYTE* addr, ssize_t disp
desc->vpdNext = nullptr;
-#if !defined(JIT32_GCENCODER) || !defined(FEATURE_EH_FUNCLETS)
/* the lower 2 bits encode props about the stk ptr */
+ CLANG_FORMAT_COMMENT_ANCHOR;
+#if defined(JIT32_GCENCODER) && !defined(FEATURE_EH_FUNCLETS)
if (offs == emitSyncThisObjOffs)
{
desc->vpdVarNum |= this_OFFSET_FLAG;
diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp
index e65dc8c1a237..4f9cb501a341 100644
--- a/src/coreclr/jit/emitarm64.cpp
+++ b/src/coreclr/jit/emitarm64.cpp
@@ -1059,6 +1059,8 @@ void emitter::emitInsSanityCheck(instrDesc* id)
// (predicated)
case IF_SVE_AS_4A: // ........xx.mmmmm ...gggaaaaaddddd -- SVE integer multiply-add writing multiplicand
// (predicated)
+ case IF_SVE_GI_4A: // ........xx.mmmmm ...gggnnnnnddddd -- SVE2 histogram generation (vector)
+ case IF_SVE_HU_4A: // ........xx.mmmmm ...gggnnnnnddddd -- SVE floating-point multiply-accumulate writing addend
elemsize = id->idOpSize();
assert(insOptsScalableStandard(id->idInsOpt())); // xx
assert(isVectorRegister(id->idReg1())); // ddddd
@@ -1086,6 +1088,18 @@ void emitter::emitInsSanityCheck(instrDesc* id)
case IF_SVE_EO_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 saturating multiply-add long
case IF_SVE_EV_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE integer clamp
case IF_SVE_EX_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE permute vector elements (quadwords)
+ case IF_SVE_FL_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract long
+ case IF_SVE_FM_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract wide
+ case IF_SVE_FN_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer multiply long
+ case IF_SVE_FP_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 bitwise exclusive-or interleaved
+ case IF_SVE_FQ_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 bitwise permute
+ case IF_SVE_FS_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract interleaved long
+ case IF_SVE_FW_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer absolute difference and accumulate
+ case IF_SVE_FX_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer absolute difference and accumulate long
+ case IF_SVE_GC_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract narrow high part
+ case IF_SVE_GF_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 histogram generation (segment)
+ case IF_SVE_GW_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE FP clamp
+ case IF_SVE_HK_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE floating-point arithmetic (unpredicated)
elemsize = id->idOpSize();
assert(insOptsScalableStandard(id->idInsOpt())); // xx
assert(isVectorRegister(id->idReg1())); // ddddd
@@ -1105,6 +1119,23 @@ void emitter::emitInsSanityCheck(instrDesc* id)
assert(isValidScalarDatasize(elemsize));
break;
+ case IF_SVE_BH_3A: // .........x.mmmmm ....hhnnnnnddddd -- SVE address generation
+ assert(id->idInsOpt() == INS_OPTS_SCALABLE_S || id->idInsOpt() == INS_OPTS_SCALABLE_D);
+ assert(isVectorRegister(id->idReg1())); // ddddd
+ assert(isVectorRegister(id->idReg2())); // nnnnn
+ assert(isVectorRegister(id->idReg3())); // mmmmm
+ assert(isValidUimm2(emitGetInsSC(id))); // hh
+ break;
+
+ case IF_SVE_BH_3B: // ...........mmmmm ....hhnnnnnddddd -- SVE address generation
+ case IF_SVE_BH_3B_A: // ...........mmmmm ....hhnnnnnddddd -- SVE address generation
+ assert(id->idInsOpt() == INS_OPTS_SCALABLE_D_SXTW || id->idInsOpt() == INS_OPTS_SCALABLE_D_UXTW);
+ assert(isVectorRegister(id->idReg1())); // ddddd
+ assert(isVectorRegister(id->idReg2())); // nnnnn
+ assert(isVectorRegister(id->idReg3())); // mmmmm
+ assert(isValidUimm2(emitGetInsSC(id))); // hh
+ break;
+
case IF_SVE_BL_1A: // ............iiii ......pppppddddd -- SVE element count
elemsize = id->idOpSize();
assert(id->idInsOpt() == INS_OPTS_NONE);
@@ -1113,6 +1144,52 @@ void emitter::emitInsSanityCheck(instrDesc* id)
assert(isValidUimm4From1(emitGetInsSC(id)));
break;
+ case IF_SVE_CE_2A: // ................ ......nnnnn.DDDD -- SVE move predicate from vector
+ assert(isPredicateRegister(id->idReg1())); // DDDD
+ assert(isVectorRegister(id->idReg2())); // nnnnn
+ break;
+
+ case IF_SVE_CE_2B: // .........i...ii. ......nnnnn.DDDD -- SVE move predicate from vector
+ assert(isPredicateRegister(id->idReg1())); // DDDD
+ assert(isVectorRegister(id->idReg2())); // nnnnn
+ assert(isValidUimm<3>(emitGetInsSC(id)));
+ break;
+
+ case IF_SVE_CE_2C: // ..............i. ......nnnnn.DDDD -- SVE move predicate from vector
+ assert(isPredicateRegister(id->idReg1())); // DDDD
+ assert(isVectorRegister(id->idReg2())); // nnnnn
+ assert(isValidUimm<1>(emitGetInsSC(id))); // i
+ break;
+
+ case IF_SVE_CE_2D: // .............ii. ......nnnnn.DDDD -- SVE move predicate from vector
+ assert(isPredicateRegister(id->idReg1())); // DDDD
+ assert(isVectorRegister(id->idReg2())); // nnnnn
+ assert(isValidUimm<3>(emitGetInsSC(id))); // ii
+ break;
+
+ case IF_SVE_CF_2A: // ................ .......NNNNddddd -- SVE move predicate into vector
+ assert(isVectorRegister(id->idReg1())); // ddddd
+ assert(isPredicateRegister(id->idReg2())); // NNNN
+ break;
+
+ case IF_SVE_CF_2B: // .........i...ii. .......NNNNddddd -- SVE move predicate into vector
+ assert(isVectorRegister(id->idReg1())); // ddddd
+ assert(isPredicateRegister(id->idReg2())); // NNNN
+ assert(isValidUimm<3>(emitGetInsSC(id)));
+ break;
+
+ case IF_SVE_CF_2C: // ..............i. .......NNNNddddd -- SVE move predicate into vector
+ assert(isVectorRegister(id->idReg1())); // ddddd
+ assert(isPredicateRegister(id->idReg2())); // NNNN
+ assert(isValidUimm<1>(emitGetInsSC(id))); // i
+ break;
+
+ case IF_SVE_CF_2D: // .............ii. .......NNNNddddd -- SVE move predicate into vector
+ assert(isVectorRegister(id->idReg1())); // ddddd
+ assert(isPredicateRegister(id->idReg2())); // NNNN
+ assert(isValidUimm<2>(emitGetInsSC(id))); // ii
+ break;
+
case IF_SVE_CI_3A: // ........xx..MMMM .......NNNN.DDDD -- SVE permute predicate elements
elemsize = id->idOpSize();
assert(insOptsScalableStandard(id->idInsOpt()));
@@ -1176,15 +1253,40 @@ void emitter::emitInsSanityCheck(instrDesc* id)
assert(isValidUimm7(emitGetInsSC(id))); // iiiii
break;
- case IF_SVE_BR_3B: // ...........mmmmm ......nnnnnddddd -- SVE permute vector segments
- case IF_SVE_EW_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 multiply-add (checked pointer)
- case IF_SVE_EW_3B: // ...........mmmmm ......aaaaaddddd -- SVE2 multiply-add (checked pointer)
+ case IF_SVE_BR_3B: // ...........mmmmm ......nnnnnddddd -- SVE permute vector segments
+ case IF_SVE_FN_3B: // ...........mmmmm ......nnnnnddddd -- SVE2 integer multiply long
+ case IF_SVE_FO_3A: // ...........mmmmm ......nnnnnddddd -- SVE integer matrix multiply accumulate
+ case IF_SVE_AT_3B: // ...........mmmmm ......nnnnnddddd -- SVE integer add/subtract vectors (unpredicated)
+ case IF_SVE_AU_3A: // ...........mmmmm ......nnnnnddddd -- SVE bitwise logical operations (unpredicated)
+ case IF_SVE_BD_3B: // ...........mmmmm ......nnnnnddddd -- SVE2 integer multiply vectors (unpredicated)
+ case IF_SVE_EF_3A: // ...........mmmmm ......nnnnnddddd -- SVE two-way dot product
+ case IF_SVE_EI_3A: // ...........mmmmm ......nnnnnddddd -- SVE mixed sign dot product
+ case IF_SVE_GJ_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 crypto constructive binary operations
+ case IF_SVE_GN_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 FP8 multiply-add long
+ case IF_SVE_GO_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 FP8 multiply-add long long
+ case IF_SVE_GW_3B: // ...........mmmmm ......nnnnnddddd -- SVE FP clamp
+ case IF_SVE_HA_3A: // ...........mmmmm ......nnnnnddddd -- SVE BFloat16 floating-point dot product
+ case IF_SVE_HA_3A_E: // ...........mmmmm ......nnnnnddddd -- SVE BFloat16 floating-point dot product
+ case IF_SVE_HB_3A: // ...........mmmmm ......nnnnnddddd -- SVE floating-point multiply-add long
+ case IF_SVE_HD_3A: // ...........mmmmm ......nnnnnddddd -- SVE floating point matrix multiply accumulate
+ case IF_SVE_HD_3A_A: // ...........mmmmm ......nnnnnddddd -- SVE floating point matrix multiply accumulate
+ case IF_SVE_HK_3B: // ...........mmmmm ......nnnnnddddd -- SVE floating-point arithmetic (unpredicated)
+ case IF_SVE_AV_3A: // ...........mmmmm ......kkkkkddddd -- SVE2 bitwise ternary operations
assert(insOptsScalable(id->idInsOpt()));
assert(isVectorRegister(id->idReg1())); // ddddd
assert(isVectorRegister(id->idReg2())); // nnnnn/mmmmm
assert(isVectorRegister(id->idReg3())); // mmmmm/aaaaa
break;
+ case IF_SVE_HA_3A_F: // ...........mmmmm ......nnnnnddddd -- SVE BFloat16 floating-point dot product
+ case IF_SVE_EW_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 multiply-add (checked pointer)
+ case IF_SVE_EW_3B: // ...........mmmmm ......aaaaaddddd -- SVE2 multiply-add (checked pointer)
+ assert(insOptsNone(id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1())); // ddddd
+ assert(isVectorRegister(id->idReg2())); // nnnnn/aaaaa
+ assert(isVectorRegister(id->idReg3())); // mmmmm
+ break;
+
case IF_SVE_EG_3A: // ...........iimmm ......nnnnnddddd -- SVE two-way dot product (indexed)
case IF_SVE_EY_3A: // ...........iimmm ......nnnnnddddd -- SVE integer dot product (indexed)
case IF_SVE_EZ_3A: // ...........iimmm ......nnnnnddddd -- SVE mixed sign dot product (indexed)
@@ -1407,6 +1509,16 @@ void emitter::emitInsSanityCheck(instrDesc* id)
assert(isScalableVectorSize(elemsize));
break;
+ case IF_SVE_AB_3B: // ................ ...gggmmmmmddddd -- SVE integer add/subtract vectors (predicated)
+ case IF_SVE_HL_3B: // ................ ...gggmmmmmddddd -- SVE floating-point arithmetic (predicated)
+ elemsize = id->idOpSize();
+ assert(insOptsScalableStandard(id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1())); // ddddd
+ assert(isLowPredicateRegister(id->idReg2())); // ggg
+ assert(isVectorRegister(id->idReg3())); // mmmmm
+ assert(isScalableVectorSize(elemsize));
+ break;
+
// Scalable to Simd Vector.
case IF_SVE_AG_3A: // ........xx...... ...gggnnnnnddddd -- SVE bitwise logical reduction (quadwords)
case IF_SVE_AJ_3A: // ........xx...... ...gggnnnnnddddd -- SVE integer add reduction (quadwords)
@@ -1503,6 +1615,19 @@ void emitter::emitInsSanityCheck(instrDesc* id)
assert(isVectorRegister(id->idReg3())); // nnnnn
break;
+ case IF_SVE_CW_4A: // ........xx.mmmmm ..VVVVnnnnnddddd -- SVE select vector elements (predicated)
+ elemsize = id->idOpSize();
+ assert(isScalableVectorSize(elemsize)); // xx
+ assert(insOptsScalableStandard(id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1())); // ddddd
+ assert(isPredicateRegister(id->idReg2())); // VVVV
+ assert(isVectorRegister(id->idReg3())); // nnnnn
+ if (id->idIns() == INS_sve_sel)
+ {
+ assert(isVectorRegister(id->idReg4())); // mmmmm
+ }
+ break;
+
// Scalable from general scalar (possibly SP)
case IF_SVE_CQ_3A: // ........xx...... ...gggnnnnnddddd -- SVE copy general register to vector (predicated)
elemsize = id->idOpSize();
@@ -1608,6 +1733,21 @@ void emitter::emitInsSanityCheck(instrDesc* id)
// x
break;
+ case IF_SVE_BB_2A: // ...........nnnnn .....iiiiiiddddd -- SVE stack frame adjustment
+ assert(insOptsNone(id->idInsOpt()));
+ assert(id->idOpSize() == EA_8BYTE);
+ assert(isGeneralRegisterOrZR(id->idReg1())); // ddddd
+ assert(isGeneralRegisterOrZR(id->idReg2())); // nnnnn
+ assert(isValidSimm6(emitGetInsSC(id))); // iiiiii
+ break;
+
+ case IF_SVE_BC_1A: // ................ .....iiiiiiddddd -- SVE stack frame size
+ assert(insOptsNone(id->idInsOpt()));
+ assert(id->idOpSize() == EA_8BYTE);
+ assert(isGeneralRegister(id->idReg1())); // ddddd
+ assert(isValidSimm6(emitGetInsSC(id))); // iiiiii
+ break;
+
case IF_SVE_FR_2A: // .........x.xxiii ......nnnnnddddd -- SVE2 bitwise shift left long
{
assert(insOptsScalableWide(id->idInsOpt()));
@@ -2367,6 +2507,46 @@ void emitter::emitInsSanityCheck(instrDesc* id)
// iiiiii
break;
+ case IF_SVE_GG_3A: // ........ii.mmmmm ......nnnnnddddd -- SVE2 lookup table with 2-bit indices and 16-bit
+ // element size
+ assert(isVectorRegister(id->idReg1())); // ddddd
+ assert(isVectorRegister(id->idReg2())); // nnnnn
+ assert(isVectorRegister(id->idReg3())); // mmmmm
+ assert(isValidUimm2(emitGetInsSC(id))); // ii
+ assert(id->idInsOpt() == INS_OPTS_SCALABLE_B);
+ break;
+
+ case IF_SVE_GH_3B: // ........ii.mmmmm ......nnnnnddddd -- SVE2 lookup table with 4-bit indices and 16-bit
+ // element size
+ case IF_SVE_GH_3B_B: // ........ii.mmmmm ......nnnnnddddd -- SVE2 lookup table with 4-bit indices and 16-bit
+ // element size
+ assert(isVectorRegister(id->idReg1())); // ddddd
+ assert(isVectorRegister(id->idReg2())); // nnnnn
+ assert(isVectorRegister(id->idReg3())); // mmmmm
+ assert(isValidUimm2(emitGetInsSC(id))); // ii
+ assert(id->idInsOpt() == INS_OPTS_SCALABLE_H);
+ break;
+
+ case IF_SVE_GG_3B: // ........ii.mmmmm ...i..nnnnnddddd -- SVE2 lookup table with 2-bit indices and 16-bit
+ // element size
+ assert(isVectorRegister(id->idReg1())); // ddddd
+ assert(isVectorRegister(id->idReg2())); // nnnnn
+ assert(isVectorRegister(id->idReg3())); // mmmmm
+ assert(isValidUimm3(emitGetInsSC(id))); // ii
+ // i
+ assert(id->idInsOpt() == INS_OPTS_SCALABLE_H);
+ break;
+
+ case IF_SVE_GH_3A: // ........i..mmmmm ......nnnnnddddd -- SVE2 lookup table with 4-bit indices and 16-bit
+ // element size
+ assert(insOptsScalable(id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1())); // ddddd
+ assert(isVectorRegister(id->idReg2())); // nnnnn
+ assert(isVectorRegister(id->idReg3())); // mmmmm
+ assert(isValidImm1(emitGetInsSC(id))); // i
+ assert(id->idInsOpt() == INS_OPTS_SCALABLE_B);
+ break;
+
case IF_SVE_HY_3A: // .........h.mmmmm ...gggnnnnn.oooo -- SVE 32-bit gather prefetch (scalar plus 32-bit scaled
// offsets)
case IF_SVE_HY_3A_A: // .........h.mmmmm ...gggnnnnn.oooo -- SVE 32-bit gather prefetch (scalar plus 32-bit
@@ -2497,6 +2677,93 @@ void emitter::emitInsSanityCheck(instrDesc* id)
assert(isGeneralRegister(id->idReg3()));
break;
+ case IF_SVE_BI_2A: // ................ ......nnnnnddddd -- SVE constructive prefix (unpredicated)
+ assert(insOptsNone(id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ break;
+
+ case IF_SVE_HH_2A: // ................ ......nnnnnddddd -- SVE2 FP8 upconverts
+ assert(id->idInsOpt() == INS_OPTS_SCALABLE_H);
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ break;
+
+ case IF_SVE_CB_2A: // ........xx...... ......nnnnnddddd -- SVE broadcast general register
+ assert(insOptsScalableStandard(id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isGeneralRegisterOrZR(id->idReg2())); // ZR is SP
+ break;
+
+ case IF_SVE_CG_2A: // ........xx...... ......nnnnnddddd -- SVE reverse vector elements
+ assert(insOptsScalableStandard(id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ break;
+
+ case IF_SVE_BJ_2A: // ........xx...... ......nnnnnddddd -- SVE floating-point exponential accelerator
+ case IF_SVE_CH_2A: // ........xx...... ......nnnnnddddd -- SVE unpack vector elements
+ case IF_SVE_HF_2A: // ........xx...... ......nnnnnddddd -- SVE floating-point reciprocal estimate (unpredicated)
+ assert(insOptsScalableAtLeastHalf(id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ break;
+
+ case IF_SVE_BF_2A: // ........xx.xxiii ......nnnnnddddd -- SVE bitwise shift by immediate (unpredicated)
+ case IF_SVE_FT_2A: // ........xx.xxiii ......nnnnnddddd -- SVE2 bitwise shift and insert
+ case IF_SVE_FU_2A: // ........xx.xxiii ......nnnnnddddd -- SVE2 bitwise shift right and accumulate
+ imm = emitGetInsSC(id);
+ elemsize = id->idOpSize();
+ assert(isValidVectorShiftAmount(imm, optGetSveElemsize(id->idInsOpt()),
+ emitInsIsVectorRightShift(id->idIns())));
+ assert(insOptsScalableStandard(id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ assert(isScalableVectorSize(elemsize));
+ break;
+
+ case IF_SVE_BX_2A: // ...........ixxxx ......nnnnnddddd -- sve_int_perm_dupq_i
+ imm = emitGetInsSC(id);
+ elemsize = id->idOpSize();
+ assert(insOptsScalableStandard(id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ assert(isScalableVectorSize(elemsize));
+#ifdef DEBUG
+ switch (id->idInsOpt())
+ {
+ case INS_OPTS_SCALABLE_B:
+ assert(isValidUimm4(imm));
+ break;
+
+ case INS_OPTS_SCALABLE_H:
+ assert(isValidUimm3(imm));
+ break;
+
+ case INS_OPTS_SCALABLE_S:
+ assert(isValidUimm2(imm));
+ break;
+
+ case INS_OPTS_SCALABLE_D:
+ assert(isValidImm1(imm));
+ break;
+
+ default:
+ break;
+ }
+#endif // DEBUG
+ break;
+
+ case IF_SVE_BY_2A: // ............iiii ......mmmmmddddd -- sve_int_perm_extq
+ imm = emitGetInsSC(id);
+ elemsize = id->idOpSize();
+ assert(id->idInsOpt() == INS_OPTS_SCALABLE_B);
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ assert(isScalableVectorSize(elemsize));
+ assert(isValidUimm4(imm));
+ break;
+
default:
printf("unexpected format %s\n", emitIfName(id->idInsFmt()));
assert(!"Unexpected format");
@@ -7551,6 +7818,15 @@ void emitter::emitIns_R_I(instruction ins,
}
break;
+ case INS_sve_rdvl:
+ assert(insOptsNone(opt));
+ assert(size == EA_8BYTE);
+ assert(isGeneralRegister(reg)); // ddddd
+ assert(isValidSimm6(imm)); // iiiiii
+ fmt = IF_SVE_BC_1A;
+ canEncode = true;
+ break;
+
case INS_sve_smax:
case INS_sve_smin:
signedImm = true;
@@ -8587,6 +8863,30 @@ void emitter::emitIns_R_R(instruction ins,
}
break;
+ case INS_sve_pmov:
+ if (opt != INS_OPTS_SCALABLE_B)
+ {
+ assert(insOptsScalableStandard(opt));
+ return emitIns_R_R_I(INS_sve_pmov, attr, reg1, reg2, 0, opt, sopt);
+ }
+ if (sopt == INS_SCALABLE_OPTS_TO_PREDICATE)
+ {
+ assert(isPredicateRegister(reg1));
+ assert(isVectorRegister(reg2));
+ fmt = IF_SVE_CE_2A;
+ }
+ else if (sopt == INS_SCALABLE_OPTS_TO_VECTOR)
+ {
+ assert(isVectorRegister(reg1));
+ assert(isPredicateRegister(reg2));
+ fmt = IF_SVE_CF_2A;
+ }
+ else
+ {
+ assert(!"invalid instruction");
+ }
+ break;
+
case INS_sve_movs:
{
assert(opt == INS_OPTS_SCALABLE_B);
@@ -8598,10 +8898,31 @@ void emitter::emitIns_R_R(instruction ins,
case INS_sve_mov:
{
- assert(opt == INS_OPTS_SCALABLE_B);
- assert(isPredicateRegister(reg1)); // dddd
- assert(isPredicateRegister(reg2)); // nnnn
- fmt = IF_SVE_CZ_4A_L;
+ if (isGeneralRegisterOrSP(reg2))
+ {
+ assert(insScalableOptsNone(sopt));
+ assert(insOptsScalableStandard(opt));
+ assert(isVectorRegister(reg1));
+#ifdef DEBUG
+ if (opt == INS_OPTS_SCALABLE_D)
+ {
+ assert(size == EA_8BYTE);
+ }
+ else
+ {
+ assert(size == EA_4BYTE);
+ }
+#endif // DEBUG
+ reg2 = encodingSPtoZR(reg2);
+ fmt = IF_SVE_CB_2A;
+ }
+ else
+ {
+ assert(opt == INS_OPTS_SCALABLE_B);
+ assert(isPredicateRegister(reg1)); // dddd
+ assert(isPredicateRegister(reg2)); // nnnn
+ fmt = IF_SVE_CZ_4A_L;
+ }
break;
}
@@ -8636,10 +8957,22 @@ void emitter::emitIns_R_R(instruction ins,
break;
case INS_sve_rev:
- assert(insOptsScalableStandard(opt));
- assert(isPredicateRegister(reg1)); // DDDD
- assert(isPredicateRegister(reg2)); // NNNN
- fmt = IF_SVE_CJ_2A;
+ if (sopt == INS_SCALABLE_OPTS_UNPREDICATED)
+ {
+ assert(insOptsScalableStandard(opt));
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isScalableVectorSize(size));
+ fmt = IF_SVE_CG_2A;
+ }
+ else
+ {
+ assert(insScalableOptsNone(sopt));
+ assert(insOptsScalableStandard(opt));
+ assert(isPredicateRegister(reg1)); // DDDD
+ assert(isPredicateRegister(reg2)); // NNNN
+ fmt = IF_SVE_CJ_2A;
+ }
break;
case INS_sve_ptest:
@@ -8761,6 +9094,86 @@ void emitter::emitIns_R_R(instruction ins,
fmt = IF_SVE_GK_2A;
break;
+ case INS_sve_frecpe:
+ case INS_sve_frsqrte:
+ assert(insScalableOptsNone(sopt));
+ assert(insOptsScalableAtLeastHalf(opt));
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isScalableVectorSize(size));
+ fmt = IF_SVE_HF_2A;
+ break;
+
+ case INS_sve_sunpkhi:
+ case INS_sve_sunpklo:
+ case INS_sve_uunpkhi:
+ case INS_sve_uunpklo:
+ assert(insScalableOptsNone(sopt));
+ assert(insOptsScalableAtLeastHalf(opt));
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isScalableVectorSize(size));
+ fmt = IF_SVE_CH_2A;
+ break;
+
+ case INS_sve_fexpa:
+ assert(insScalableOptsNone(sopt));
+ assert(insOptsScalableAtLeastHalf(opt));
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isScalableVectorSize(size));
+ fmt = IF_SVE_BJ_2A;
+ break;
+
+ case INS_sve_dup:
+ assert(insScalableOptsNone(sopt));
+ assert(insOptsScalableStandard(opt));
+ assert(isVectorRegister(reg1));
+ assert(isGeneralRegisterOrSP(reg2));
+#ifdef DEBUG
+ if (opt == INS_OPTS_SCALABLE_D)
+ {
+ assert(size == EA_8BYTE);
+ }
+ else
+ {
+ assert(size == EA_4BYTE);
+ }
+#endif // DEBUG
+ reg2 = encodingSPtoZR(reg2);
+ fmt = IF_SVE_CB_2A;
+
+ // DUP is an alias for MOV;
+ // MOV is the preferred disassembly
+ ins = INS_sve_mov;
+ break;
+
+ case INS_sve_bf1cvt:
+ case INS_sve_bf1cvtlt:
+ case INS_sve_bf2cvt:
+ case INS_sve_bf2cvtlt:
+ case INS_sve_f1cvt:
+ case INS_sve_f1cvtlt:
+ case INS_sve_f2cvt:
+ case INS_sve_f2cvtlt:
+ assert(insScalableOptsNone(sopt));
+ assert(opt == INS_OPTS_SCALABLE_H);
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isScalableVectorSize(size));
+ fmt = IF_SVE_HH_2A;
+ unreached(); // not supported yet
+ break;
+
+ case INS_sve_movprfx:
+ assert(insScalableOptsNone(sopt));
+ assert(insOptsNone(opt));
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isScalableVectorSize(size));
+ fmt = IF_SVE_BI_2A;
+ break;
+
default:
unreached();
break;
@@ -9484,11 +9897,87 @@ void emitter::emitIns_R_R_I(instruction ins,
case INS_sve_uqshl:
case INS_sve_asrd:
isRightShift = emitInsIsVectorRightShift(ins);
- assert(insOptsScalableStandard(opt));
- assert(isVectorRegister(reg1)); // ddddd
- assert(isLowPredicateRegister(reg2)); // ggg
assert(isValidVectorShiftAmount(imm, optGetSveElemsize(opt), isRightShift));
- fmt = IF_SVE_AM_2A;
+ assert(insOptsScalableStandard(opt));
+ assert(isScalableVectorSize(size));
+
+ if (sopt == INS_SCALABLE_OPTS_UNPREDICATED)
+ {
+ assert((ins == INS_sve_asr) || (ins == INS_sve_lsl) || (ins == INS_sve_lsr));
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ fmt = IF_SVE_BF_2A;
+ }
+ else
+ {
+ assert(insScalableOptsNone(sopt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isLowPredicateRegister(reg2)); // ggg
+ fmt = IF_SVE_AM_2A;
+ }
+ break;
+
+ case INS_sve_addvl:
+ case INS_sve_addpl:
+ assert(insOptsNone(opt));
+ assert(size == EA_8BYTE);
+ assert(isGeneralRegisterOrSP(reg1)); // ddddd
+ assert(isGeneralRegisterOrSP(reg2)); // nnnnn
+ assert(isValidSimm6(imm)); // iiiiii
+ reg1 = encodingSPtoZR(reg1);
+ reg2 = encodingSPtoZR(reg2);
+ fmt = IF_SVE_BB_2A;
+ break;
+
+ case INS_sve_pmov:
+ if (sopt == INS_SCALABLE_OPTS_TO_PREDICATE)
+ {
+ assert(isPredicateRegister(reg1));
+ assert(isVectorRegister(reg2));
+ switch (opt)
+ {
+ case INS_OPTS_SCALABLE_D:
+ assert(isValidUimm<3>(imm));
+ fmt = IF_SVE_CE_2B;
+ break;
+ case INS_OPTS_SCALABLE_S:
+ assert(isValidUimm<2>(imm));
+ fmt = IF_SVE_CE_2D;
+ break;
+ case INS_OPTS_SCALABLE_H:
+ assert(isValidUimm<1>(imm));
+ fmt = IF_SVE_CE_2C;
+ break;
+ default:
+ unreached();
+ }
+ }
+ else if (sopt == INS_SCALABLE_OPTS_TO_VECTOR)
+ {
+ assert(isVectorRegister(reg1));
+ assert(isPredicateRegister(reg2));
+ switch (opt)
+ {
+ case INS_OPTS_SCALABLE_D:
+ assert(isValidUimm<3>(imm));
+ fmt = IF_SVE_CF_2B;
+ break;
+ case INS_OPTS_SCALABLE_S:
+ assert(isValidUimm<2>(imm));
+ fmt = IF_SVE_CF_2D;
+ break;
+ case INS_OPTS_SCALABLE_H:
+ assert(isValidUimm<1>(imm));
+ fmt = IF_SVE_CF_2C;
+ break;
+ default:
+ unreached();
+ }
+ }
+ else
+ {
+ unreached();
+ }
break;
case INS_sve_sqrshrn:
@@ -9658,6 +10147,74 @@ void emitter::emitIns_R_R_I(instruction ins,
}
break;
+ case INS_sve_sli:
+ case INS_sve_sri:
+ isRightShift = emitInsIsVectorRightShift(ins);
+ assert(isValidVectorShiftAmount(imm, optGetSveElemsize(opt), isRightShift));
+ assert(insOptsScalableStandard(opt));
+ assert(insScalableOptsNone(sopt));
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isScalableVectorSize(size));
+ fmt = IF_SVE_FT_2A;
+ break;
+
+ case INS_sve_srsra:
+ case INS_sve_ssra:
+ case INS_sve_ursra:
+ case INS_sve_usra:
+ isRightShift = emitInsIsVectorRightShift(ins);
+ assert(isValidVectorShiftAmount(imm, optGetSveElemsize(opt), isRightShift));
+ assert(insOptsScalableStandard(opt));
+ assert(insScalableOptsNone(sopt));
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isScalableVectorSize(size));
+ fmt = IF_SVE_FU_2A;
+ break;
+
+ case INS_sve_dupq:
+ assert(insOptsScalableStandard(opt));
+ assert(insScalableOptsNone(sopt));
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isScalableVectorSize(size));
+#ifdef DEBUG
+ switch (opt)
+ {
+ case INS_OPTS_SCALABLE_B:
+ assert(isValidUimm4(imm));
+ break;
+
+ case INS_OPTS_SCALABLE_H:
+ assert(isValidUimm3(imm));
+ break;
+
+ case INS_OPTS_SCALABLE_S:
+ assert(isValidUimm2(imm));
+ break;
+
+ case INS_OPTS_SCALABLE_D:
+ assert(isValidImm1(imm));
+ break;
+
+ default:
+ break;
+ }
+#endif // DEBUG
+ fmt = IF_SVE_BX_2A;
+ break;
+
+ case INS_sve_extq:
+ assert(opt == INS_OPTS_SCALABLE_B);
+ assert(insScalableOptsNone(sopt));
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isScalableVectorSize(size));
+ assert(isValidUimm4(imm));
+ fmt = IF_SVE_BY_2A;
+ break;
+
default:
unreached();
break;
@@ -10580,12 +11137,22 @@ void emitter::emitIns_R_R_R(instruction ins,
case INS_sve_bic:
case INS_sve_eor:
case INS_sve_orr:
- assert(isVectorRegister(reg1));
- assert(isLowPredicateRegister(reg2));
- assert(isVectorRegister(reg3));
assert(insOptsScalableStandard(opt));
- assert(insScalableOptsNone(sopt));
- fmt = IF_SVE_AA_3A;
+ assert(isVectorRegister(reg1)); // mmmmm
+ assert(isVectorRegister(reg3)); // ddddd
+
+ if (sopt == INS_SCALABLE_OPTS_UNPREDICATED)
+ {
+ assert(opt == INS_OPTS_SCALABLE_D);
+ assert(isVectorRegister(reg2)); // nnnnn
+ fmt = IF_SVE_AU_3A;
+ }
+ else
+ {
+ assert(insScalableOptsNone(sopt));
+ assert(isLowPredicateRegister(reg2)); // ggg
+ fmt = IF_SVE_AA_3A;
+ }
break;
case INS_sve_add:
@@ -10608,6 +11175,26 @@ void emitter::emitIns_R_R_R(instruction ins,
}
break;
+ case INS_sve_addpt:
+ case INS_sve_subpt:
+ unreached(); // TODO-SVE: Not yet supported.
+ assert(opt == INS_OPTS_SCALABLE_D);
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg3)); // mmmmm
+
+ if (sopt == INS_SCALABLE_OPTS_UNPREDICATED)
+ {
+ assert(isVectorRegister(reg2)); // nnnnn
+ fmt = IF_SVE_AT_3B;
+ }
+ else
+ {
+ assert(insScalableOptsNone(sopt));
+ assert(isLowPredicateRegister(reg2)); // ggg
+ fmt = IF_SVE_AB_3B;
+ }
+ break;
+
case INS_sve_sdiv:
case INS_sve_sdivr:
case INS_sve_udiv:
@@ -10653,6 +11240,14 @@ void emitter::emitIns_R_R_R(instruction ins,
}
break;
+ case INS_sve_pmul:
+ assert(opt == INS_OPTS_SCALABLE_B);
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ fmt = IF_SVE_BD_3B;
+ break;
+
case INS_sve_andv:
case INS_sve_eorv:
case INS_sve_orv:
@@ -10842,12 +11437,28 @@ void emitter::emitIns_R_R_R(instruction ins,
case INS_sve_sdot:
case INS_sve_udot:
- assert(insOptsScalableWords(opt));
- assert(isVectorRegister(reg1)); // ddddd
- assert(isVectorRegister(reg2)); // nnnnn
- assert(isVectorRegister(reg3)); // mmmmm
- assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
- fmt = IF_SVE_EH_3A;
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+
+ if (opt == INS_OPTS_SCALABLE_H)
+ {
+ fmt = IF_SVE_EF_3A;
+ }
+ else
+ {
+ fmt = IF_SVE_EH_3A;
+ assert(insOptsScalableWords(opt));
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ }
+ break;
+
+ case INS_sve_usdot:
+ assert(opt == INS_OPTS_SCALABLE_B);
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ fmt = IF_SVE_EI_3A;
break;
case INS_sve_smlalb:
@@ -10921,6 +11532,289 @@ void emitter::emitIns_R_R_R(instruction ins,
fmt = IF_SVE_EX_3A;
break;
+ case INS_sve_saddlb:
+ case INS_sve_saddlt:
+ case INS_sve_uaddlb:
+ case INS_sve_uaddlt:
+ case INS_sve_ssublb:
+ case INS_sve_ssublt:
+ case INS_sve_usublb:
+ case INS_sve_usublt:
+ case INS_sve_sabdlb:
+ case INS_sve_sabdlt:
+ case INS_sve_uabdlb:
+ case INS_sve_uabdlt:
+ assert(insOptsScalableAtLeastHalf(opt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ fmt = IF_SVE_FL_3A;
+ break;
+
+ case INS_sve_saddwb:
+ case INS_sve_saddwt:
+ case INS_sve_uaddwb:
+ case INS_sve_uaddwt:
+ case INS_sve_ssubwb:
+ case INS_sve_ssubwt:
+ case INS_sve_usubwb:
+ case INS_sve_usubwt:
+ assert(insOptsScalableAtLeastHalf(opt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ fmt = IF_SVE_FM_3A;
+ break;
+
+ case INS_sve_smullb:
+ case INS_sve_smullt:
+ case INS_sve_umullb:
+ case INS_sve_umullt:
+ case INS_sve_sqdmullb:
+ case INS_sve_sqdmullt:
+ assert(insOptsScalableAtLeastHalf(opt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ fmt = IF_SVE_FN_3A;
+ break;
+
+ case INS_sve_pmullb:
+ case INS_sve_pmullt:
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+
+ if (opt == INS_OPTS_SCALABLE_Q)
+ {
+ fmt = IF_SVE_FN_3B;
+ }
+ else
+ {
+ assert((opt == INS_OPTS_SCALABLE_H) || (opt == INS_OPTS_SCALABLE_D));
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ fmt = IF_SVE_FN_3A;
+ }
+ break;
+
+ case INS_sve_smmla:
+ case INS_sve_usmmla:
+ case INS_sve_ummla:
+ assert(opt == INS_OPTS_SCALABLE_S);
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ fmt = IF_SVE_FO_3A;
+ break;
+
+ case INS_sve_rax1:
+ case INS_sve_sm4ekey:
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+
+ if (ins == INS_sve_rax1)
+ {
+ assert(opt == INS_OPTS_SCALABLE_D);
+ }
+ else
+ {
+ assert(opt == INS_OPTS_SCALABLE_S);
+ }
+
+ fmt = IF_SVE_GJ_3A;
+ break;
+
+ case INS_sve_fmlalb:
+ case INS_sve_fmlalt:
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+
+ if (opt == INS_OPTS_SCALABLE_B)
+ {
+ unreached(); // TODO-SVE: Not yet supported.
+ fmt = IF_SVE_GN_3A;
+ }
+ else
+ {
+ assert(opt == INS_OPTS_SCALABLE_H);
+ fmt = IF_SVE_HB_3A;
+ }
+ break;
+
+ case INS_sve_fmlslb:
+ case INS_sve_fmlslt:
+ case INS_sve_bfmlalb:
+ case INS_sve_bfmlalt:
+ case INS_sve_bfmlslb:
+ case INS_sve_bfmlslt:
+ assert(opt == INS_OPTS_SCALABLE_H);
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ fmt = IF_SVE_HB_3A;
+ break;
+
+ case INS_sve_bfmmla:
+ assert(opt == INS_OPTS_SCALABLE_H);
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ fmt = IF_SVE_HD_3A;
+ break;
+
+ case INS_sve_fmmla:
+ unreached(); // TODO-SVE: Not yet supported.
+ assert(opt == INS_OPTS_SCALABLE_D);
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ fmt = IF_SVE_HD_3A_A;
+ break;
+
+ case INS_sve_fmlallbb:
+ case INS_sve_fmlallbt:
+ case INS_sve_fmlalltb:
+ case INS_sve_fmlalltt:
+ unreached(); // TODO-SVE: Not yet supported.
+ assert(opt == INS_OPTS_SCALABLE_B);
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ fmt = IF_SVE_GO_3A;
+ break;
+
+ case INS_sve_bfclamp:
+ assert(opt == INS_OPTS_SCALABLE_H);
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ fmt = IF_SVE_GW_3B;
+ break;
+
+ case INS_sve_bfdot:
+ assert(opt == INS_OPTS_SCALABLE_H);
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ fmt = IF_SVE_HA_3A;
+ break;
+
+ case INS_sve_fdot:
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+
+ if (opt == INS_OPTS_SCALABLE_H)
+ {
+ fmt = IF_SVE_HA_3A;
+ }
+ else if (opt == INS_OPTS_SCALABLE_B)
+ {
+ unreached(); // TODO-SVE: Not yet supported.
+ fmt = IF_SVE_HA_3A_E;
+ }
+ else
+ {
+ unreached(); // TODO-SVE: Not yet supported.
+ assert(insOptsNone(opt));
+ fmt = IF_SVE_HA_3A_F;
+ }
+ break;
+
+ case INS_sve_eorbt:
+ case INS_sve_eortb:
+ assert(insOptsScalableStandard(opt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ fmt = IF_SVE_FP_3A;
+ break;
+
+ case INS_sve_bext:
+ case INS_sve_bdep:
+ case INS_sve_bgrp:
+ assert(insOptsScalableStandard(opt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ fmt = IF_SVE_FQ_3A;
+ break;
+
+ case INS_sve_saddlbt:
+ case INS_sve_ssublbt:
+ case INS_sve_ssubltb:
+ assert(insOptsScalableAtLeastHalf(opt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ fmt = IF_SVE_FS_3A;
+ break;
+
+ case INS_sve_saba:
+ case INS_sve_uaba:
+ assert(insOptsScalableStandard(opt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ fmt = IF_SVE_FW_3A;
+ break;
+
+ case INS_sve_sabalb:
+ case INS_sve_sabalt:
+ case INS_sve_uabalb:
+ case INS_sve_uabalt:
+ assert(insOptsScalableAtLeastHalf(opt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ fmt = IF_SVE_FX_3A;
+ break;
+
+ case INS_sve_addhnb:
+ case INS_sve_addhnt:
+ case INS_sve_raddhnb:
+ case INS_sve_raddhnt:
+ case INS_sve_subhnb:
+ case INS_sve_subhnt:
+ case INS_sve_rsubhnb:
+ case INS_sve_rsubhnt:
+ unreached(); // TODO-SVE: Not yet supported.
+ assert(insOptsScalableWide(opt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ fmt = IF_SVE_GC_3A;
+ break;
+
+ case INS_sve_histseg:
+ assert(opt == INS_OPTS_SCALABLE_B);
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ fmt = IF_SVE_GF_3A;
+ break;
+
+ case INS_sve_fclamp:
+ assert(insOptsScalableAtLeastHalf(opt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ fmt = IF_SVE_GW_3A;
+ break;
+
case INS_sve_clz:
case INS_sve_cls:
case INS_sve_cnt:
@@ -11067,14 +11961,35 @@ void emitter::emitIns_R_R_R(instruction ins,
// TODO-SVE: Following checks can be simplified to check reg1 as predicate register only after adding
// definitions for predicate registers. Currently, predicate registers P0 to P15 are aliased to simd
// registers V0 to V15.
- if (isPredicateRegister(reg3) &&
- (sopt == INS_SCALABLE_OPTS_NONE || sopt == INS_SCALABLE_OPTS_PREDICATE_MERGE))
+ if (sopt == INS_SCALABLE_OPTS_UNPREDICATED)
+ {
+ assert(ins == INS_sve_mov);
+ assert(opt == INS_OPTS_SCALABLE_D);
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ fmt = IF_SVE_AU_3A;
+ // ORR is an alias for MOV, and is always the preferred disassembly.
+ ins = INS_sve_orr;
+ }
+ else if (isPredicateRegister(reg3) &&
+ (sopt == INS_SCALABLE_OPTS_NONE || sopt == INS_SCALABLE_OPTS_PREDICATE_MERGE))
{
assert(opt == INS_OPTS_SCALABLE_B);
assert(isPredicateRegister(reg1)); // DDDD
assert(isPredicateRegister(reg2)); // gggg
assert(isPredicateRegister(reg3)); // NNNN
fmt = sopt == INS_SCALABLE_OPTS_NONE ? IF_SVE_CZ_4A : IF_SVE_CZ_4A_K;
+ // MOV is an alias for CPY, and is always the preferred disassembly.
+ ins = INS_sve_mov;
+ }
+ else if (sopt == INS_SCALABLE_OPTS_PREDICATE_MERGE)
+ {
+ assert(isVectorRegister(reg1));
+ assert(isPredicateRegister(reg2));
+ assert(isVectorRegister(reg3));
+ assert(insOptsScalableStandard(opt));
+ fmt = IF_SVE_CW_4A;
}
else
{
@@ -11092,10 +12007,10 @@ void emitter::emitIns_R_R_R(instruction ins,
assert(isVectorRegister(reg3));
fmt = IF_SVE_CP_3A;
}
- }
- // MOV is an alias for CPY, and is always the preferred disassembly.
- ins = INS_sve_mov;
+ // MOV is an alias for CPY, and is always the preferred disassembly.
+ ins = INS_sve_mov;
+ }
break;
case INS_sve_lasta:
@@ -11383,23 +12298,52 @@ void emitter::emitIns_R_R_R(instruction ins,
fmt = IF_SVE_HJ_3A;
break;
- case INS_sve_fabd:
+ case INS_sve_frecps:
+ case INS_sve_frsqrts:
+ case INS_sve_ftsmul:
+ assert(insOptsScalableAtLeastHalf(opt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ fmt = IF_SVE_HK_3A;
+ break;
+
case INS_sve_fadd:
+ case INS_sve_fsub:
+ case INS_sve_fmul:
+ assert(insOptsScalableAtLeastHalf(opt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg3)); // mmmmm
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+
+ if (sopt == INS_SCALABLE_OPTS_UNPREDICATED)
+ {
+ assert(isVectorRegister(reg2)); // nnnnn
+ fmt = IF_SVE_HK_3A;
+ }
+ else
+ {
+ assert(insScalableOptsNone(sopt));
+ assert(isLowPredicateRegister(reg2)); // ggg
+ fmt = IF_SVE_HL_3A;
+ }
+ break;
+
+ case INS_sve_fabd:
case INS_sve_fdiv:
case INS_sve_fdivr:
case INS_sve_fmax:
case INS_sve_fmaxnm:
case INS_sve_fmin:
case INS_sve_fminnm:
- case INS_sve_fmul:
case INS_sve_fmulx:
case INS_sve_fscale:
- case INS_sve_fsub:
case INS_sve_fsubr:
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
assert(isVectorRegister(reg3));
- assert(insOptsScalableFloat(opt));
+ assert(insOptsScalableAtLeastHalf(opt));
assert(insScalableOptsNone(sopt));
fmt = IF_SVE_HL_3A;
break;
@@ -11415,6 +12359,43 @@ void emitter::emitIns_R_R_R(instruction ins,
fmt = IF_SVE_HL_3A;
break;
+ case INS_sve_bfmul:
+ case INS_sve_bfadd:
+ case INS_sve_bfsub:
+ case INS_sve_bfmaxnm:
+ case INS_sve_bfminnm:
+ case INS_sve_bfmax:
+ case INS_sve_bfmin:
+ assert(opt == INS_OPTS_SCALABLE_H);
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg3)); // mmmmm
+
+ if (sopt == INS_SCALABLE_OPTS_UNPREDICATED)
+ {
+ assert(isVectorRegister(reg2)); // nnnnn
+ fmt = IF_SVE_HK_3B;
+ }
+ else
+ {
+ assert(insScalableOptsNone(sopt));
+ assert(isLowPredicateRegister(reg2)); // ggg
+ fmt = IF_SVE_HL_3B;
+ }
+ break;
+
+ case INS_sve_bsl:
+ case INS_sve_eor3:
+ case INS_sve_bcax:
+ case INS_sve_bsl1n:
+ case INS_sve_bsl2n:
+ case INS_sve_nbsl:
+ assert(opt == INS_OPTS_SCALABLE_D);
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // mmmmm
+ assert(isVectorRegister(reg3)); // kkkkk
+ fmt = IF_SVE_AV_3A;
+ break;
+
case INS_sve_frintn:
case INS_sve_frintm:
case INS_sve_frintp:
@@ -11514,9 +12495,6 @@ void emitter::emitIns_R_R_R(instruction ins,
assert(isVectorRegister(reg2)); // nnnnn
assert(isVectorRegister(reg3)); // mmmmm
fmt = IF_SVE_EW_3A;
-
- // opt is set only for convenience in emitDispInsHelp
- opt = INS_OPTS_SCALABLE_D;
break;
case INS_sve_madpt:
@@ -11526,9 +12504,6 @@ void emitter::emitIns_R_R_R(instruction ins,
assert(isVectorRegister(reg2)); // mmmmm
assert(isVectorRegister(reg3)); // aaaaa
fmt = IF_SVE_EW_3B;
-
- // opt is set only for convenience in emitDispInsHelp
- opt = INS_OPTS_SCALABLE_D;
break;
case INS_sve_fcmeq:
@@ -11722,14 +12697,15 @@ void emitter::emitIns_R_R_R_I_LdStPair(instruction ins,
* Add an instruction referencing three registers and a constant.
*/
-void emitter::emitIns_R_R_R_I(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- ssize_t imm,
- insOpts opt /* = INS_OPTS_NONE */,
- emitAttr attrReg2 /* = EA_UNKNOWN */)
+void emitter::emitIns_R_R_R_I(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ regNumber reg3,
+ ssize_t imm,
+ insOpts opt /* = INS_OPTS_NONE */,
+ emitAttr attrReg2 /* = EA_UNKNOWN */,
+ insScalableOpts sopt /* = INS_SCALABLE_OPTS_NONE */)
{
emitAttr size = EA_SIZE(attr);
emitAttr elemsize = EA_UNKNOWN;
@@ -12019,10 +12995,12 @@ void emitter::emitIns_R_R_R_I(instruction ins,
default:
// fallback to emit SVE instructions.
- return emitInsSve_R_R_R_I(ins, attr, reg1, reg2, reg3, imm, opt, attrReg2);
+ return emitInsSve_R_R_R_I(ins, attr, reg1, reg2, reg3, imm, opt, sopt);
} // end switch (ins)
+ assert(insScalableOptsNone(sopt));
+
if (isLdSt)
{
assert(!isAddSub);
@@ -12181,14 +13159,14 @@ void emitter::emitIns_R_R_R_I(instruction ins,
* Add a SVE instruction referencing three registers and a constant.
*/
-void emitter::emitInsSve_R_R_R_I(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- ssize_t imm,
- insOpts opt /* = INS_OPTS_NONE */,
- emitAttr attrReg2 /* = EA_UNKNOWN */)
+void emitter::emitInsSve_R_R_R_I(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ regNumber reg3,
+ ssize_t imm,
+ insOpts opt /* = INS_OPTS_NONE */,
+ insScalableOpts sopt /* = INS_SCALABLE_OPTS_NONE */)
{
emitAttr size = EA_SIZE(attr);
emitAttr elemsize = EA_UNKNOWN;
@@ -12197,12 +13175,37 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
/* Figure out the encoding format of the instruction */
switch (ins)
{
+ case INS_sve_adr:
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+ assert(isValidUimm2(imm));
+ switch (opt)
+ {
+ case INS_OPTS_SCALABLE_S:
+ case INS_OPTS_SCALABLE_D:
+ assert(sopt == INS_SCALABLE_OPTS_LSL_N);
+ fmt = IF_SVE_BH_3A;
+ break;
+ case INS_OPTS_SCALABLE_D_SXTW:
+ fmt = IF_SVE_BH_3B;
+ break;
+ case INS_OPTS_SCALABLE_D_UXTW:
+ fmt = IF_SVE_BH_3B_A;
+ break;
+ default:
+ assert(!"invalid instruction");
+ break;
+ }
+ break;
+
case INS_sve_cmpeq:
case INS_sve_cmpgt:
case INS_sve_cmpge:
case INS_sve_cmpne:
case INS_sve_cmple:
case INS_sve_cmplt:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableStandard(opt));
assert(isPredicateRegister(reg1)); // DDDD
assert(isLowPredicateRegister(reg2)); // ggg
@@ -12215,6 +13218,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_cmphs:
case INS_sve_cmplo:
case INS_sve_cmpls:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableStandard(opt));
assert(isPredicateRegister(reg1)); // DDDD
assert(isLowPredicateRegister(reg2)); // ggg
@@ -12225,6 +13229,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_sdot:
case INS_sve_udot:
+ assert(insScalableOptsNone(sopt));
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
assert(isLowVectorRegister(reg3)); // mmmm
@@ -12252,6 +13257,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_usdot:
case INS_sve_sudot:
+ assert(insScalableOptsNone(sopt));
assert(opt == INS_OPTS_SCALABLE_B);
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
@@ -12262,6 +13268,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_mul:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableAtLeastHalf(opt));
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
@@ -12293,6 +13300,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_cdot:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableWords(opt));
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
@@ -12307,6 +13315,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_cmla:
case INS_sve_sqrdcmlah:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableStandard(opt));
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
@@ -12320,6 +13329,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ld1d:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalable(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12347,6 +13357,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ldff1d:
+ assert(insScalableOptsNone(sopt));
assert(opt == INS_OPTS_SCALABLE_D);
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12356,6 +13367,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ld1w:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableWordsOrQuadwords(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12375,6 +13387,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ld1sw:
+ assert(insScalableOptsNone(sopt));
assert(opt == INS_OPTS_SCALABLE_D);
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12393,6 +13406,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ldff1sw:
+ assert(insScalableOptsNone(sopt));
assert(opt == INS_OPTS_SCALABLE_D);
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12402,6 +13416,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ld1sb:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableAtLeastHalf(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12422,6 +13437,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ld1b:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableStandard(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12442,6 +13458,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_ldff1b:
case INS_sve_ldff1sb:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableWords(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12451,6 +13468,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ld1sh:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableWords(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12469,6 +13487,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ld1h:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableAtLeastHalf(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12488,6 +13507,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_ldff1h:
case INS_sve_ldff1sh:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableWords(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12497,6 +13517,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ldff1w:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableWords(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12507,6 +13528,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_ldnf1sw:
case INS_sve_ldnf1d:
+ assert(insScalableOptsNone(sopt));
assert(opt == INS_OPTS_SCALABLE_D);
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12517,6 +13539,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_ldnf1sh:
case INS_sve_ldnf1w:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableWords(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12527,6 +13550,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_ldnf1h:
case INS_sve_ldnf1sb:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableAtLeastHalf(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12536,6 +13560,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ldnf1b:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableStandard(opt));
assert(isVectorRegister(reg1));
assert(isPredicateRegister(reg2));
@@ -12548,6 +13573,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_ldnt1h:
case INS_sve_ldnt1w:
case INS_sve_ldnt1d:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableStandard(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12590,6 +13616,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_ld1row:
case INS_sve_ld1rqd:
case INS_sve_ld1rod:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableStandard(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12651,6 +13678,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_ld2q:
case INS_sve_ld3q:
case INS_sve_ld4q:
+ assert(insScalableOptsNone(sopt));
assert(opt == INS_OPTS_SCALABLE_Q);
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12692,6 +13720,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_ld2d:
case INS_sve_ld3d:
case INS_sve_ld4d:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableStandard(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12764,6 +13793,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_st2q:
case INS_sve_st3q:
case INS_sve_st4q:
+ assert(insScalableOptsNone(sopt));
assert(opt == INS_OPTS_SCALABLE_Q);
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12797,6 +13827,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_stnt1h:
case INS_sve_stnt1w:
case INS_sve_stnt1d:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableStandard(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12833,6 +13864,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_st1w:
case INS_sve_st1d:
+ assert(insScalableOptsNone(sopt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12895,6 +13927,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_st2d:
case INS_sve_st3d:
case INS_sve_st4d:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableStandard(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -12966,6 +13999,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_st1b:
case INS_sve_st1h:
+ assert(insScalableOptsNone(sopt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -13004,6 +14038,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_fmla:
case INS_sve_fmls:
+ assert(insScalableOptsNone(sopt));
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
assert(isLowVectorRegister(reg3)); // mmmm
@@ -13024,6 +14059,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_bfmla:
case INS_sve_bfmls:
+ assert(insScalableOptsNone(sopt));
assert(opt == INS_OPTS_SCALABLE_H);
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
@@ -13034,6 +14070,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_fmul:
+ assert(insScalableOptsNone(sopt));
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
assert(isLowVectorRegister(reg3)); // mmmm
@@ -13053,6 +14090,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_bfmul:
+ assert(insScalableOptsNone(sopt));
assert(opt == INS_OPTS_SCALABLE_H);
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
@@ -13063,6 +14101,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_fdot:
+ assert(insScalableOptsNone(sopt));
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
assert(isVectorRegister(reg3)); // mmm
@@ -13093,6 +14132,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_bfdot:
+ assert(insScalableOptsNone(sopt));
assert(opt == INS_OPTS_SCALABLE_H);
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
@@ -13104,6 +14144,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_mla:
case INS_sve_mls:
+ assert(insScalableOptsNone(sopt));
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
assert(isLowVectorRegister(reg3)); // mmmm
@@ -13132,6 +14173,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_smullt:
case INS_sve_umullb:
case INS_sve_umullt:
+ assert(insScalableOptsNone(sopt));
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
assert(isLowVectorRegister(reg3)); // mmmm
@@ -13158,6 +14200,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_smlslt:
case INS_sve_umlslb:
case INS_sve_umlslt:
+ assert(insScalableOptsNone(sopt));
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
assert(isLowVectorRegister(reg3)); // mmmm
@@ -13178,6 +14221,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_sqdmullb:
case INS_sve_sqdmullt:
+ assert(insScalableOptsNone(sopt));
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
assert(isLowVectorRegister(reg3)); // mmmm
@@ -13198,6 +14242,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_sqdmulh:
case INS_sve_sqrdmulh:
+ assert(insScalableOptsNone(sopt));
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
assert(isLowVectorRegister(reg3)); // mmmm
@@ -13226,6 +14271,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_sqdmlalt:
case INS_sve_sqdmlslb:
case INS_sve_sqdmlslt:
+ assert(insScalableOptsNone(sopt));
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
assert(isLowVectorRegister(reg3)); // mmmm
@@ -13246,6 +14292,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_sqrdmlah:
case INS_sve_sqrdmlsh:
+ assert(insScalableOptsNone(sopt));
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
assert(isLowVectorRegister(reg3)); // mmmm
@@ -13271,6 +14318,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_fcadd:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableAtLeastHalf(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -13281,6 +14329,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ld1rd:
+ assert(insScalableOptsNone(sopt));
assert(opt == INS_OPTS_SCALABLE_D);
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -13290,6 +14339,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ld1rsw:
+ assert(insScalableOptsNone(sopt));
assert(opt == INS_OPTS_SCALABLE_D);
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -13299,6 +14349,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ld1rsh:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableWords(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -13308,6 +14359,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ld1rw:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableWords(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -13317,6 +14369,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ld1rh:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableAtLeastHalf(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -13326,6 +14379,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ld1rsb:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableAtLeastHalf(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -13335,6 +14389,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
break;
case INS_sve_ld1rb:
+ assert(insScalableOptsNone(sopt));
assert(insOptsScalableStandard(opt));
assert(isVectorRegister(reg1));
assert(isLowPredicateRegister(reg2));
@@ -13351,6 +14406,7 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
case INS_sve_bfmlalt:
case INS_sve_bfmlslb:
case INS_sve_bfmlslt:
+ assert(insScalableOptsNone(sopt));
assert(opt == INS_OPTS_SCALABLE_H);
assert(isVectorRegister(reg1)); // ddddd
assert(isVectorRegister(reg2)); // nnnnn
@@ -13360,6 +14416,55 @@ void emitter::emitInsSve_R_R_R_I(instruction ins,
fmt = IF_SVE_GZ_3A;
break;
+ case INS_sve_luti2:
+ assert(insScalableOptsNone(sopt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+
+ if (opt == INS_OPTS_SCALABLE_H)
+ {
+ assert(isValidUimm3(imm)); // iii
+ fmt = IF_SVE_GG_3B;
+ }
+ else
+ {
+ assert(opt == INS_OPTS_SCALABLE_B);
+ assert(isValidUimm2(imm)); // ii i
+ fmt = IF_SVE_GG_3A;
+ }
+ unreached();
+ break;
+
+ case INS_sve_luti4:
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isVectorRegister(reg2)); // nnnnn
+ assert(isVectorRegister(reg3)); // mmmmm
+
+ if (opt == INS_OPTS_SCALABLE_H)
+ {
+ assert(isValidUimm2(imm));
+
+ if (sopt == INS_SCALABLE_OPTS_WITH_VECTOR_PAIR)
+ {
+ fmt = IF_SVE_GH_3B;
+ }
+ else
+ {
+ assert(insScalableOptsNone(sopt));
+ fmt = IF_SVE_GH_3B_B;
+ }
+ }
+ else
+ {
+ assert(opt == INS_OPTS_SCALABLE_B);
+ assert(insScalableOptsNone(sopt));
+ assert(isValidImm1(imm)); // i
+ fmt = IF_SVE_GH_3A;
+ }
+ unreached();
+ break;
+
default:
unreached();
break;
@@ -13832,6 +14937,33 @@ void emitter::emitInsSve_R_R_R_R(instruction ins,
/* Figure out the encoding format of the instruction */
switch (ins)
{
+ case INS_sve_sel:
+ if (sopt == INS_SCALABLE_OPTS_UNPREDICATED)
+ {
+ if (reg1 == reg4)
+ {
+ // mov is a preferred alias for sel
+ return emitIns_R_R_R(INS_sve_mov, attr, reg1, reg2, reg3, opt, INS_SCALABLE_OPTS_PREDICATE_MERGE);
+ }
+
+ assert(insOptsScalableStandard(opt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isPredicateRegister(reg2)); // VVVV
+ assert(isVectorRegister(reg3)); // nnnnn
+ assert(isVectorRegister(reg4)); // mmmmm
+ fmt = IF_SVE_CW_4A;
+ }
+ else
+ {
+ assert(opt == INS_OPTS_SCALABLE_B);
+ assert(isPredicateRegister(reg1)); // dddd
+ assert(isPredicateRegister(reg2)); // gggg
+ assert(isPredicateRegister(reg3)); // nnnn
+ assert(isPredicateRegister(reg4)); // mmmm
+ fmt = IF_SVE_CZ_4A;
+ }
+ break;
+
case INS_sve_cmpeq:
case INS_sve_cmpgt:
case INS_sve_cmpge:
@@ -13867,7 +14999,6 @@ void emitter::emitInsSve_R_R_R_R(instruction ins,
case INS_sve_bic:
case INS_sve_orn:
case INS_sve_bics:
- case INS_sve_sel:
case INS_sve_eors:
case INS_sve_nor:
case INS_sve_nand:
@@ -13937,6 +15068,29 @@ void emitter::emitInsSve_R_R_R_R(instruction ins,
fmt = IF_SVE_AR_4A;
break;
+ case INS_sve_histcnt:
+ assert(insOptsScalableWords(opt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isLowPredicateRegister(reg2)); // ggg
+ assert(isVectorRegister(reg3)); // nnnnn
+ assert(isVectorRegister(reg4)); // mmmmm
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ fmt = IF_SVE_GI_4A;
+ break;
+
+ case INS_sve_fmla:
+ case INS_sve_fmls:
+ case INS_sve_fnmla:
+ case INS_sve_fnmls:
+ assert(insOptsScalableAtLeastHalf(opt));
+ assert(isVectorRegister(reg1)); // ddddd
+ assert(isLowPredicateRegister(reg2)); // ggg
+ assert(isVectorRegister(reg3)); // nnnnn
+ assert(isVectorRegister(reg4)); // mmmmm
+ assert(isValidVectorElemsize(optGetSveElemsize(opt))); // xx
+ fmt = IF_SVE_HU_4A;
+ break;
+
case INS_sve_mad:
case INS_sve_msb:
assert(insOptsScalableStandard(opt));
@@ -17013,7 +18167,7 @@ void emitter::emitIns_Call(EmitCallType callType,
{
assert(isLowPredicateRegister(reg));
emitter::code_t ureg = (emitter::code_t)reg - (emitter::code_t)REG_P0;
- assert((ureg >= 0) && (ureg <= 15));
+ assert((ureg >= 0) && (ureg <= 7));
return ureg << 10;
}
@@ -18292,6 +19446,94 @@ void emitter::emitIns_Call(EmitCallType callType,
/*****************************************************************************
*
+ * Returns the encoding for the field 'tszh:tszl:imm3' at bit locations '23-22:20-19:18-16'.
+ */
+
+/*static*/ emitter::code_t emitter::insEncodeSveElemsizeWithShift_tszh_tszl_imm3(const insOpts opt,
+ ssize_t imm,
+ bool isRightShift)
+{
+ code_t encoding = 0;
+
+ imm = insEncodeShiftImmediate(optGetSveElemsize(opt), isRightShift, imm);
+
+ switch (opt)
+ {
+ case INS_OPTS_SCALABLE_B:
+ imm = imm & 0b111; // bits 18-16
+ encoding |= (1 << 19); // bit 19
+ break;
+
+ case INS_OPTS_SCALABLE_H:
+ imm = imm & 0b1111; // bits 19-16
+ encoding |= (1 << 20); // bit 20
+ break;
+
+ case INS_OPTS_SCALABLE_S:
+ imm = imm & 0b11111; // bits 20-16
+ encoding |= (1 << 22); // bit 22
+ break;
+
+ case INS_OPTS_SCALABLE_D:
+ // this gets the last bit of 'imm' and tries to set bit 22
+ encoding |= ((imm >> 5) << 22);
+ imm = imm & 0b11111; // bits 20-16
+ encoding |= (1 << 23); // bit 23
+ break;
+
+ default:
+ assert(!"Invalid size for vector register");
+ break;
+ }
+
+ return (encoding | (code_t)(imm << 16));
+}
+
+/*****************************************************************************
+ *
+ * Returns the encoding for the field 'i1:tsz' at bit locations '20:19-16'.
+ */
+
+/*static*/ emitter::code_t emitter::insEncodeSveElemsizeWithImmediate_i1_tsz(const insOpts opt, ssize_t imm)
+{
+ code_t encoding = 0;
+
+ switch (opt)
+ {
+ case INS_OPTS_SCALABLE_B:
+ assert(isValidUimm4(imm));
+ encoding |= (1 << 16); // bit 16
+ encoding |= (imm << 17); // bits 20-17
+ break;
+
+ case INS_OPTS_SCALABLE_H:
+ assert(isValidUimm3(imm));
+ encoding |= (1 << 17); // bit 17
+ encoding |= (imm << 18); // bits 20-18
+ break;
+
+ case INS_OPTS_SCALABLE_S:
+ assert(isValidUimm2(imm));
+ encoding |= (1 << 18); // bit 18
+ encoding |= (imm << 19); // bits 20-19
+ break;
+
+ case INS_OPTS_SCALABLE_D:
+ assert(isValidImm1(imm));
+ encoding |= (1 << 19); // bit 19
+ encoding |= (imm << 20); // bit 20
+ break;
+
+ default:
+ assert(!"Invalid size for vector register");
+ break;
+ }
+
+ return encoding;
+}
+
+/*****************************************************************************
+ *
* Returns the encoding to select the elemsize for an Arm64 SVE vector instruction plus an immediate.
* This specifically encodes the field 'tszh:tszl' at bit locations '23-22:9-8'.
*/
@@ -18602,6 +19844,10 @@ void emitter::emitIns_Call(EmitCallType callType,
case IF_SVE_CZ_4A_A:
case IF_SVE_CZ_4A_L:
+ case IF_SVE_CE_2A:
+ case IF_SVE_CE_2B:
+ case IF_SVE_CE_2C:
+ case IF_SVE_CE_2D:
case IF_SVE_CF_2A:
case IF_SVE_CF_2B:
case IF_SVE_CF_2C:
@@ -20247,6 +21493,21 @@ void emitter::emitIns_Call(EmitCallType callType,
/*****************************************************************************
*
+ * Returns the encoding for the immediate value as 6-bits at bit locations '10-5'.
+ */
+
+/*static*/ emitter::code_t emitter::insEncodeSimm6_10_to_5(ssize_t imm)
+{
+ assert(isValidSimm6(imm));
+ if (imm < 0)
+ {
+ imm = (imm & 0x3F);
+ }
+ return (code_t)imm << 5;
+}
+
+/*****************************************************************************
+ *
* Returns the encoding for the immediate value as 6-bits at bit locations '20-16'.
*/
@@ -20295,6 +21556,43 @@ void emitter::emitIns_Call(EmitCallType callType,
/*****************************************************************************
*
+ * Returns the encoding for the immediate value as 2-bits at bit locations '23-22'.
+ */
+
+/*static*/ emitter::code_t emitter::insEncodeUimm2_23_to_22(ssize_t imm)
+{
+ assert(isValidUimm2(imm));
+ return (code_t)imm << 22;
+}
+
+/*****************************************************************************
+ *
+ * Returns the encoding for the immediate value as 1-bit at bit locations '23'.
+ */
+
+/*static*/ emitter::code_t emitter::insEncodeUimm1_23(ssize_t imm)
+{
+ assert(isValidImm1(imm));
+ return (code_t)imm << 23;
+}
+
+/*****************************************************************************
+ *
+ * Returns the encoding for the immediate value as 3-bits at bit locations '23-22' for high and '12' for low.
+ */
+
+/*static*/ emitter::code_t emitter::insEncodeUimm3h3l_23_to_22_and_12(ssize_t imm)
+{
+ assert(isValidUimm3(imm));
+
+ code_t h = (code_t)(imm & 0x6) << 21; // encode high 2-bits at locations '23-22'
+ code_t l = (code_t)(imm & 0x1) << 12; // encode low 1-bit at locations '12'
+
+ return (h | l);
+}
+
+/*****************************************************************************
+ *
* Returns the encoding for the immediate value as 1 bit at bit location '10'.
*/
@@ -20339,6 +21637,17 @@ void emitter::emitIns_Call(EmitCallType callType,
/*****************************************************************************
*
+ * Returns the encoding for the immediate value as 4-bits at bit locations '19-16'.
+ */
+
+/*static*/ emitter::code_t emitter::insEncodeUimm4_19_to_16(ssize_t imm)
+{
+ assert(isValidUimm4(imm));
+ return (code_t)imm << 16;
+}
+
+/*****************************************************************************
+ *
* Returns the encoding for the immediate value as 4-bits starting from 1, at bit locations '19-16'.
*/
@@ -22656,6 +23965,15 @@ BYTE* emitter::emitOutput_InstrSve(BYTE* dst, instrDesc* id)
dst += emitOutput_Instr(dst, code);
break;
+ case IF_SVE_AB_3B: // ................ ...gggmmmmmddddd -- SVE integer add/subtract vectors (predicated)
+ case IF_SVE_HL_3B: // ................ ...gggmmmmmddddd -- SVE floating-point arithmetic (predicated)
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_P_12_to_10(id->idReg2()); // ggg
+ code |= insEncodeReg_V_9_to_5(id->idReg3()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
+
// Scalable with Merge or Zero predicate
case IF_SVE_AH_3A: // ........xx.....M ...gggnnnnnddddd -- SVE constructive prefix (predicated)
code = emitInsCodeSve(ins, fmt);
@@ -22684,6 +24002,8 @@ BYTE* emitter::emitOutput_InstrSve(BYTE* dst, instrDesc* id)
// Scalable, 4 regs. Reg4 in mmmmm.
case IF_SVE_AR_4A: // ........xx.mmmmm ...gggnnnnnddddd -- SVE integer multiply-accumulate writing addend
// (predicated)
+ case IF_SVE_GI_4A: // ........xx.mmmmm ...gggnnnnnddddd -- SVE2 histogram generation (vector)
+ case IF_SVE_HU_4A: // ........xx.mmmmm ...gggnnnnnddddd -- SVE floating-point multiply-accumulate writing addend
code = emitInsCodeSve(ins, fmt);
code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
code |= insEncodeReg_P_12_to_10(id->idReg2()); // ggg
@@ -22723,6 +24043,18 @@ BYTE* emitter::emitOutput_InstrSve(BYTE* dst, instrDesc* id)
case IF_SVE_EO_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 saturating multiply-add long
case IF_SVE_EV_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE integer clamp
case IF_SVE_EX_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE permute vector elements (quadwords)
+ case IF_SVE_FL_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract long
+ case IF_SVE_FM_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract wide
+ case IF_SVE_FN_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer multiply long
+ case IF_SVE_FP_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 bitwise exclusive-or interleaved
+ case IF_SVE_FQ_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 bitwise permute
+ case IF_SVE_FS_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract interleaved long
+ case IF_SVE_FW_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer absolute difference and accumulate
+ case IF_SVE_FX_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer absolute difference and accumulate long
+ case IF_SVE_GC_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract narrow high part
+ case IF_SVE_GF_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 histogram generation (segment)
+ case IF_SVE_GW_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE FP clamp
+ case IF_SVE_HK_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE floating-point arithmetic (unpredicated)
code = emitInsCodeSve(ins, fmt);
code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
code |= insEncodeReg_V_9_to_5(id->idReg2()); // nnnnn
@@ -22742,6 +24074,26 @@ BYTE* emitter::emitOutput_InstrSve(BYTE* dst, instrDesc* id)
dst += emitOutput_Instr(dst, code);
break;
+ case IF_SVE_BH_3A: // .........x.mmmmm ....hhnnnnnddddd -- SVE address generation
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_V_9_to_5(id->idReg2()); // nnnnn
+ code |= insEncodeReg_V_20_to_16(id->idReg3()); // mmmmm
+ code |= insEncodeUimm2_11_to_10(emitGetInsSC(id)); // hh
+ code |= insEncodeImm1_22(id->idInsOpt() == INS_OPTS_SCALABLE_D ? 1 : 0);
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_BH_3B: // ...........mmmmm ....hhnnnnnddddd -- SVE address generation
+ case IF_SVE_BH_3B_A: // ...........mmmmm ....hhnnnnnddddd -- SVE address generation
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_V_9_to_5(id->idReg2()); // nnnnn
+ code |= insEncodeReg_V_20_to_16(id->idReg3()); // mmmmm
+ code |= insEncodeUimm2_11_to_10(emitGetInsSC(id)); // hh
+ dst += emitOutput_Instr(dst, code);
+ break;
+
// Immediate and patterm to general purpose.
case IF_SVE_BL_1A: // ............iiii ......pppppddddd -- SVE element count
imm = emitGetInsSC(id);
@@ -22752,6 +24104,68 @@ BYTE* emitter::emitOutput_InstrSve(BYTE* dst, instrDesc* id)
dst += emitOutput_Instr(dst, code);
break;
+ case IF_SVE_CE_2A: // ................ ......nnnnn.DDDD -- SVE move predicate from vector
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_P_3_to_0(id->idReg1()); // DDDD
+ code |= insEncodeReg_V_9_to_5(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_CE_2B: // .........i...ii. ......nnnnn.DDDD -- SVE move predicate from vector
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_P_3_to_0(id->idReg1()); // DDDD
+ code |= insEncodeReg_V_9_to_5(id->idReg2()); // nnnnn
+ code |= insEncodeSplitUimm<22, 22, 18, 17>(emitGetInsSC(id)); // i...ii
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_CE_2C: // ..............i. ......nnnnn.DDDD -- SVE move predicate from vector
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_P_3_to_0(id->idReg1()); // DDDD
+ code |= insEncodeReg_V_9_to_5(id->idReg2()); // nnnnn
+ code |= insEncodeUimm<17, 17>(emitGetInsSC(id)); // i
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_CE_2D: // .............ii. ......nnnnn.DDDD -- SVE move predicate from vector
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_P_3_to_0(id->idReg1()); // DDDD
+ code |= insEncodeReg_V_9_to_5(id->idReg2()); // nnnnn
+ code |= insEncodeUimm<18, 17>(emitGetInsSC(id)); // ii
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_CF_2A: // ................ .......NNNNddddd -- SVE move predicate into vector
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_P_8_to_5(id->idReg2()); // NNNN
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_CF_2B: // .........i...ii. .......NNNNddddd -- SVE move predicate into vector
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_P_8_to_5(id->idReg2()); // NNNN
+ code |= insEncodeSplitUimm<22, 22, 18, 17>(emitGetInsSC(id)); // i...ii
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_CF_2C: // ..............i. .......NNNNddddd -- SVE move predicate into vector
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_P_8_to_5(id->idReg2()); // NNNN
+ code |= insEncodeUimm<17, 17>(emitGetInsSC(id)); // i
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_CF_2D: // .............ii. .......NNNNddddd -- SVE move predicate into vector
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_P_8_to_5(id->idReg2()); // NNNN
+ code |= insEncodeUimm<18, 17>(emitGetInsSC(id)); // ii
+ dst += emitOutput_Instr(dst, code);
+ break;
+
case IF_SVE_CI_3A: // ........xx..MMMM .......NNNN.DDDD -- SVE permute predicate elements
code = emitInsCodeSve(ins, fmt);
code |= insEncodeReg_P_3_to_0(id->idReg1()); // DDDD
@@ -22833,6 +24247,19 @@ BYTE* emitter::emitOutput_InstrSve(BYTE* dst, instrDesc* id)
dst += emitOutput_Instr(dst, code);
break;
+ case IF_SVE_CW_4A: // ........xx.mmmmm ..VVVVnnnnnddddd -- SVE select vector elements (predicated)
+ {
+ regNumber reg4 = (ins == INS_sve_mov ? id->idReg1() : id->idReg4());
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_P_13_to_10(id->idReg2()); // VVVV
+ code |= insEncodeReg_V_9_to_5(id->idReg3()); // nnnnn
+ code |= insEncodeReg_V_20_to_16(reg4); // mmmmm
+ code |= insEncodeElemsize(optGetSveElemsize(id->idInsOpt())); // xx
+ dst += emitOutput_Instr(dst, code);
+ break;
+ }
+
case IF_SVE_CX_4A: // ........xx.mmmmm ...gggnnnnn.DDDD -- SVE integer compare vectors
case IF_SVE_CX_4A_A: // ........xx.mmmmm ...gggnnnnn.DDDD -- SVE integer compare vectors
case IF_SVE_GE_4A: // ........xx.mmmmm ...gggnnnnn.DDDD -- SVE2 character match
@@ -22868,8 +24295,26 @@ BYTE* emitter::emitOutput_InstrSve(BYTE* dst, instrDesc* id)
dst += emitOutput_Instr(dst, code);
break;
- case IF_SVE_EW_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 multiply-add (checked pointer)
- case IF_SVE_BR_3B: // ...........mmmmm ......nnnnnddddd -- SVE permute vector segments
+ case IF_SVE_EW_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 multiply-add (checked pointer)
+ case IF_SVE_BR_3B: // ...........mmmmm ......nnnnnddddd -- SVE permute vector segments
+ case IF_SVE_FN_3B: // ...........mmmmm ......nnnnnddddd -- SVE2 integer multiply long
+ case IF_SVE_FO_3A: // ...........mmmmm ......nnnnnddddd -- SVE integer matrix multiply accumulate
+ case IF_SVE_AT_3B: // ...........mmmmm ......nnnnnddddd -- SVE integer add/subtract vectors (unpredicated)
+ case IF_SVE_AU_3A: // ...........mmmmm ......nnnnnddddd -- SVE bitwise logical operations (unpredicated)
+ case IF_SVE_BD_3B: // ...........mmmmm ......nnnnnddddd -- SVE2 integer multiply vectors (unpredicated)
+ case IF_SVE_EF_3A: // ...........mmmmm ......nnnnnddddd -- SVE two-way dot product
+ case IF_SVE_EI_3A: // ...........mmmmm ......nnnnnddddd -- SVE mixed sign dot product
+ case IF_SVE_GJ_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 crypto constructive binary operations
+ case IF_SVE_GN_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 FP8 multiply-add long
+ case IF_SVE_GO_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 FP8 multiply-add long long
+ case IF_SVE_GW_3B: // ...........mmmmm ......nnnnnddddd -- SVE FP clamp
+ case IF_SVE_HA_3A: // ...........mmmmm ......nnnnnddddd -- SVE BFloat16 floating-point dot product
+ case IF_SVE_HA_3A_E: // ...........mmmmm ......nnnnnddddd -- SVE BFloat16 floating-point dot product
+ case IF_SVE_HA_3A_F: // ...........mmmmm ......nnnnnddddd -- SVE BFloat16 floating-point dot product
+ case IF_SVE_HB_3A: // ...........mmmmm ......nnnnnddddd -- SVE floating-point multiply-add long
+ case IF_SVE_HD_3A: // ...........mmmmm ......nnnnnddddd -- SVE floating point matrix multiply accumulate
+ case IF_SVE_HD_3A_A: // ...........mmmmm ......nnnnnddddd -- SVE floating point matrix multiply accumulate
+ case IF_SVE_HK_3B: // ...........mmmmm ......nnnnnddddd -- SVE floating-point arithmetic (unpredicated)
code = emitInsCodeSve(ins, fmt);
code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
code |= insEncodeReg_V_9_to_5(id->idReg2()); // nnnnn
@@ -22877,6 +24322,29 @@ BYTE* emitter::emitOutput_InstrSve(BYTE* dst, instrDesc* id)
dst += emitOutput_Instr(dst, code);
break;
+ case IF_SVE_AV_3A: // ...........mmmmm ......kkkkkddddd -- SVE2 bitwise ternary operations
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_V_20_to_16(id->idReg2()); // mmmmm
+ code |= insEncodeReg_V_9_to_5(id->idReg3()); // kkkkk
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_BB_2A: // ...........nnnnn .....iiiiiiddddd -- SVE stack frame adjustment
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_R_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeSimm6_10_to_5(emitGetInsSC(id)); // iiiiii
+ code |= insEncodeReg_R_20_to_16(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_BC_1A: // ................ .....iiiiiiddddd -- SVE stack frame size
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_R_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeSimm6_10_to_5(emitGetInsSC(id)); // iiiiii
+ dst += emitOutput_Instr(dst, code);
+ break;
+
case IF_SVE_EW_3B: // ...........mmmmm ......aaaaaddddd -- SVE2 multiply-add (checked pointer)
code = emitInsCodeSve(ins, fmt);
code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
@@ -23811,6 +25279,44 @@ BYTE* emitter::emitOutput_InstrSve(BYTE* dst, instrDesc* id)
dst += emitOutput_Instr(dst, code);
break;
+ case IF_SVE_GG_3A: // ........ii.mmmmm ......nnnnnddddd -- SVE2 lookup table with 2-bit indices and 16-bit
+ // element size
+ case IF_SVE_GH_3B: // ........ii.mmmmm ......nnnnnddddd -- SVE2 lookup table with 4-bit indices and 16-bit
+ // element size
+ case IF_SVE_GH_3B_B: // ........ii.mmmmm ......nnnnnddddd -- SVE2 lookup table with 4-bit indices and 16-bit
+ // element size
+ imm = emitGetInsSC(id);
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_V_9_to_5(id->idReg2()); // nnnnn
+ code |= insEncodeReg_V_20_to_16(id->idReg3()); // mmmmm
+ code |= insEncodeUimm2_23_to_22(imm); // ii
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_GG_3B: // ........ii.mmmmm ...i..nnnnnddddd -- SVE2 lookup table with 2-bit indices and 16-bit
+ // element size
+ imm = emitGetInsSC(id);
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_V_9_to_5(id->idReg2()); // nnnnn
+ code |= insEncodeReg_V_20_to_16(id->idReg3()); // mmmmm
+ code |= insEncodeUimm3h3l_23_to_22_and_12(imm); // ii
+ // i
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_GH_3A: // ........i..mmmmm ......nnnnnddddd -- SVE2 lookup table with 4-bit indices and 16-bit
+ // element size
+ imm = emitGetInsSC(id);
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_V_9_to_5(id->idReg2()); // nnnnn
+ code |= insEncodeReg_V_20_to_16(id->idReg3()); // mmmmm
+ code |= insEncodeUimm1_23(imm); // i
+ dst += emitOutput_Instr(dst, code);
+ break;
+
case IF_SVE_HY_3A: // .........h.mmmmm ...gggnnnnn.oooo -- SVE 32-bit gather prefetch (scalar plus 32-bit scaled
// offsets)
case IF_SVE_HY_3A_A: // .........h.mmmmm ...gggnnnnn.oooo -- SVE 32-bit gather prefetch (scalar plus 32-bit
@@ -24026,6 +25532,63 @@ BYTE* emitter::emitOutput_InstrSve(BYTE* dst, instrDesc* id)
dst += emitOutput_Instr(dst, code);
break;
+ case IF_SVE_BI_2A: // ................ ......nnnnnddddd -- SVE constructive prefix (unpredicated)
+ case IF_SVE_HH_2A: // ................ ......nnnnnddddd -- SVE2 FP8 upconverts
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_V_9_to_5(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_CB_2A: // ........xx...... ......nnnnnddddd -- SVE broadcast general register
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ code |= insEncodeSveElemsize(optGetSveElemsize(id->idInsOpt())); // xx
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_BJ_2A: // ........xx...... ......nnnnnddddd -- SVE floating-point exponential accelerator
+ case IF_SVE_CG_2A: // ........xx...... ......nnnnnddddd -- SVE reverse vector elements
+ case IF_SVE_CH_2A: // ........xx...... ......nnnnnddddd -- SVE unpack vector elements
+ case IF_SVE_HF_2A: // ........xx...... ......nnnnnddddd -- SVE floating-point reciprocal estimate (unpredicated)
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_V_9_to_5(id->idReg2()); // nnnnn
+ code |= insEncodeSveElemsize(optGetSveElemsize(id->idInsOpt())); // xx
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_BF_2A: // ........xx.xxiii ......nnnnnddddd -- SVE bitwise shift by immediate (unpredicated)
+ case IF_SVE_FT_2A: // ........xx.xxiii ......nnnnnddddd -- SVE2 bitwise shift and insert
+ case IF_SVE_FU_2A: // ........xx.xxiii ......nnnnnddddd -- SVE2 bitwise shift right and accumulate
+ imm = emitGetInsSC(id);
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_V_9_to_5(id->idReg2()); // nnnnn
+ code |= insEncodeSveElemsizeWithShift_tszh_tszl_imm3(id->idInsOpt(), imm,
+ emitInsIsVectorRightShift(ins)); // xx xxiii
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_BX_2A: // ...........ixxxx ......nnnnnddddd -- sve_int_perm_dupq_i
+ imm = emitGetInsSC(id);
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_V_9_to_5(id->idReg2()); // nnnnn
+ code |= insEncodeSveElemsizeWithImmediate_i1_tsz(id->idInsOpt(), imm); // ixxxx
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_SVE_BY_2A: // ............iiii ......mmmmmddddd -- sve_int_perm_extq
+ imm = emitGetInsSC(id);
+ code = emitInsCodeSve(ins, fmt);
+ code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd
+ code |= insEncodeReg_V_9_to_5(id->idReg2()); // mmmmm
+ code |= insEncodeUimm4_19_to_16(imm); // iiii
+ dst += emitOutput_Instr(dst, code);
+ break;
+
default:
assert(!"Unexpected format");
break;
@@ -24305,11 +25868,17 @@ void emitter::emitDispSveExtendOpts(insOpts opt)
{
switch (opt)
{
+ case INS_OPTS_LSL:
+ printf("lsl");
+ break;
+
+ case INS_OPTS_UXTW:
case INS_OPTS_SCALABLE_S_UXTW:
case INS_OPTS_SCALABLE_D_UXTW:
printf("uxtw");
break;
+ case INS_OPTS_SXTW:
case INS_OPTS_SCALABLE_S_SXTW:
case INS_OPTS_SCALABLE_D_SXTW:
printf("sxtw");
@@ -24326,27 +25895,18 @@ void emitter::emitDispSveExtendOpts(insOpts opt)
* Prints the encoding for the Extend Type encoding along with the N value
*/
-void emitter::emitDispSveExtendOptsModN(insOpts opt, int n)
+void emitter::emitDispSveExtendOptsModN(insOpts opt, ssize_t imm)
{
- assert(n >= 0 && n <= 3);
+ assert(imm >= 0 && imm <= 3);
- emitDispSveExtendOpts(opt);
- switch (n)
+ if (imm == 0 && opt != INS_OPTS_LSL)
{
- case 3:
- printf(" #3");
- break;
-
- case 2:
- printf(" #2");
- break;
-
- case 1:
- printf(" #1");
- break;
-
- default:
- break;
+ emitDispSveExtendOpts(opt);
+ }
+ else if (imm > 0)
+ {
+ emitDispSveExtendOpts(opt);
+ printf(" #%d", (int)imm);
}
}
@@ -24517,20 +26077,46 @@ void emitter::emitDispReg(regNumber reg, emitAttr attr, bool addComma)
}
//------------------------------------------------------------------------
+// emitDispSveReg: Display a scalable vector register name
+//
+void emitter::emitDispSveReg(regNumber reg, bool addComma)
+{
+ assert(isVectorRegister(reg));
+ printf(emitSveRegName(reg));
+
+ if (addComma)
+ emitDispComma();
+}
+
+//------------------------------------------------------------------------
// emitDispSveReg: Display a scalable vector register name with an arrangement suffix
//
void emitter::emitDispSveReg(regNumber reg, insOpts opt, bool addComma)
{
- assert(insOptsScalable(opt) || insOptsScalable32bitExtends(opt));
assert(isVectorRegister(reg));
printf(emitSveRegName(reg));
- emitDispArrangement(opt);
+
+ if (opt != INS_OPTS_NONE)
+ {
+ assert(insOptsScalable(opt) || insOptsScalable32bitExtends(opt));
+ emitDispArrangement(opt);
+ }
if (addComma)
emitDispComma();
}
//------------------------------------------------------------------------
+// emitDispSveRegIndex: Display a scalable vector register with indexed element
+//
+void emitter::emitDispSveRegIndex(regNumber reg, ssize_t index, bool addComma)
+{
+ assert(isVectorRegister(reg));
+ printf(emitSveRegName(reg));
+ emitDispElementIndex(index, addComma);
+}
+
+//------------------------------------------------------------------------
// emitDispVectorReg: Display a SIMD vector register name with an arrangement suffix
//
void emitter::emitDispVectorReg(regNumber reg, insOpts opt, bool addComma)
@@ -26453,6 +28039,10 @@ void emitter::emitDispInsHelp(
// (predicated)
case IF_SVE_GR_3A: // ........xx...... ...gggmmmmmddddd -- SVE2 floating-point pairwise operations
case IF_SVE_HL_3A: // ........xx...... ...gggmmmmmddddd -- SVE floating-point arithmetic (predicated)
+ // <Zdn>.D, <Pg>/M, <Zdn>.D, <Zm>.D
+ case IF_SVE_AB_3B: // ................ ...gggmmmmmddddd -- SVE integer add/subtract vectors (predicated)
+ // <Zdn>.H, <Pg>/M, <Zdn>.H, <Zm>.H
+ case IF_SVE_HL_3B: // ................ ...gggmmmmmddddd -- SVE floating-point arithmetic (predicated)
emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd
emitDispLowPredicateReg(id->idReg2(), insGetPredicateType(fmt), id->idInsOpt(), true); // ggg
emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd
@@ -26491,6 +28081,9 @@ void emitter::emitDispInsHelp(
// (predicated)
case IF_SVE_AS_4A: // ........xx.mmmmm ...gggaaaaaddddd -- SVE integer multiply-add writing multiplicand
// (predicated)
+ case IF_SVE_HU_4A: // ........xx.mmmmm ...gggnnnnnddddd -- SVE floating-point multiply-accumulate writing addend
+ // <Zd>.<T>, <Pg>/Z, <Zn>.<T>, <Zm>.<T>
+ case IF_SVE_GI_4A: // ........xx.mmmmm ...gggnnnnnddddd -- SVE2 histogram generation (vector)
emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd
emitDispLowPredicateReg(id->idReg2(), insGetPredicateType(fmt), id->idInsOpt(), true); // ggg
emitDispSveReg(id->idReg3(), id->idInsOpt(), true);
@@ -26502,21 +28095,61 @@ void emitter::emitDispInsHelp(
case IF_SVE_BD_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer multiply vectors (unpredicated)
case IF_SVE_BE_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 signed saturating doubling multiply high
// (unpredicated)
+ case IF_SVE_FP_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 bitwise exclusive-or interleaved
+ case IF_SVE_FQ_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 bitwise permute
case IF_SVE_BK_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE floating-point trig select coefficient
case IF_SVE_BR_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE permute vector segments
case IF_SVE_CA_3A: // ........xx.mmmmm ......nnnnnddddd -- sve_int_perm_tbxquads
case IF_SVE_EV_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE integer clamp
+ case IF_SVE_GW_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE FP clamp
+ case IF_SVE_HK_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE floating-point arithmetic (unpredicated)
// <Zda>.<T>, <Zn>.<T>, <Zm>.<T>
case IF_SVE_EM_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 saturating multiply-add high
+ case IF_SVE_FW_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer absolute difference and accumulate
// <Zd>.Q, <Zn>.Q, <Zm>.Q
case IF_SVE_BR_3B: // ...........mmmmm ......nnnnnddddd -- SVE permute vector segments
// <Zda>.D, <Zn>.D, <Zm>.D
+ case IF_SVE_HD_3A_A: // ...........mmmmm ......nnnnnddddd -- SVE floating point matrix multiply accumulate
+ // <Zd>.D, <Zn>.D, <Zm>.D
+ case IF_SVE_AT_3B: // ...........mmmmm ......nnnnnddddd -- SVE integer add/subtract vectors (unpredicated)
+ case IF_SVE_AU_3A: // ...........mmmmm ......nnnnnddddd -- SVE bitwise logical operations (unpredicated)
+ // <Zd>.B, <Zn>.B, <Zm>.B
+ case IF_SVE_GF_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 histogram generation (segment)
+ case IF_SVE_BD_3B: // ...........mmmmm ......nnnnnddddd -- SVE2 integer multiply vectors (unpredicated)
+ // <Zd>.D, <Zn>.D, <Zm>.D
+ // <Zd>.S, <Zn>.S, <Zm>.S
+ case IF_SVE_GJ_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 crypto constructive binary operations
+ // <Zd>.H, <Zn>.H, <Zm>.H
+ case IF_SVE_GW_3B: // ...........mmmmm ......nnnnnddddd -- SVE FP clamp
+ case IF_SVE_HK_3B: // ...........mmmmm ......nnnnnddddd -- SVE floating-point arithmetic (unpredicated)
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd
+ emitDispSveReg(id->idReg2(), id->idInsOpt(), true); // nnnnn/mmmmm
+ emitDispSveReg(id->idReg3(), id->idInsOpt(), false); // mmmmm/aaaaa
+ break;
+
+ // <Zda>.D, <Zn>.D, <Zm>.D
case IF_SVE_EW_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 multiply-add (checked pointer)
// <Zdn>.D, <Zm>.D, <Za>.D
case IF_SVE_EW_3B: // ...........mmmmm ......aaaaaddddd -- SVE2 multiply-add (checked pointer)
+ emitDispSveReg(id->idReg1(), INS_OPTS_SCALABLE_D, true); // ddddd
+ emitDispSveReg(id->idReg2(), INS_OPTS_SCALABLE_D, true); // nnnnn
+ emitDispSveReg(id->idReg3(), INS_OPTS_SCALABLE_D, false); // mmmmm
+ break;
+
+ // <Zdn>.D, <Zdn>.D, <Zm>.D, <Zk>.D
+ case IF_SVE_AV_3A: // ...........mmmmm ......kkkkkddddd -- SVE2 bitwise ternary operations
emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd
- emitDispSveReg(id->idReg2(), id->idInsOpt(), true); // nnnnn/mmmmm
- emitDispSveReg(id->idReg3(), id->idInsOpt(), false); // mmmmm/aaaaa
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd
+ emitDispSveReg(id->idReg2(), id->idInsOpt(), true); // mmmmm
+ emitDispSveReg(id->idReg3(), id->idInsOpt(), false); // kkkkk
+ break;
+
+ // <Zda>.H, <Zn>.B, <Zm>.B
+ case IF_SVE_GN_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 FP8 multiply-add long
+ case IF_SVE_HA_3A_E: // ...........mmmmm ......nnnnnddddd -- SVE BFloat16 floating-point dot product
+ emitDispSveReg(id->idReg1(), INS_OPTS_SCALABLE_H, true); // ddddd
+ emitDispSveReg(id->idReg2(), id->idInsOpt(), true); // nnnnn
+ emitDispSveReg(id->idReg3(), id->idInsOpt(), false); // mmmmm
break;
// <Zd>.<T>, <Zn>.<T>, <Zm>.<T>
@@ -26583,6 +28216,36 @@ void emitter::emitDispInsHelp(
emitDispSveReg(id->idReg3(), INS_OPTS_SCALABLE_D, false); // mmmmm
break;
+ // <Zd>.<T>, [<Zn>.<T>, <Zm>.<T>{, <mod> <amount>}]
+ case IF_SVE_BH_3A: // .........x.mmmmm ....hhnnnnnddddd -- SVE address generation
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd
+ printf("[");
+ emitDispSveReg(id->idReg2(), id->idInsOpt(), true);
+ emitDispSveReg(id->idReg3(), id->idInsOpt(), emitGetInsSC(id) > 0);
+ emitDispSveExtendOptsModN(INS_OPTS_LSL, emitGetInsSC(id));
+ printf("]");
+ break;
+
+ // <Zd>.D, [<Zn>.D, <Zm>.D, SXTW{ <amount>}]
+ case IF_SVE_BH_3B: // ...........mmmmm ....hhnnnnnddddd -- SVE address generation
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd
+ printf("[");
+ emitDispSveReg(id->idReg2(), id->idInsOpt(), true);
+ emitDispSveReg(id->idReg3(), id->idInsOpt(), true);
+ emitDispSveExtendOptsModN(INS_OPTS_SXTW, emitGetInsSC(id));
+ printf("]");
+ break;
+
+ // <Zd>.D, [<Zn>.D, <Zm>.D, UXTW{ <amount>}]
+ case IF_SVE_BH_3B_A: // ...........mmmmm ....hhnnnnnddddd -- SVE address generation
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd
+ printf("[");
+ emitDispSveReg(id->idReg2(), id->idInsOpt(), true);
+ emitDispSveReg(id->idReg3(), id->idInsOpt(), true);
+ emitDispSveExtendOptsModN(INS_OPTS_UXTW, emitGetInsSC(id));
+ printf("]");
+ break;
+
// <Pd>.H, <Pn>.B
case IF_SVE_CK_2A: // ................ .......NNNN.DDDD -- SVE unpack predicate elements
emitDispPredicateReg(id->idReg1(), insGetPredicateType(fmt), INS_OPTS_SCALABLE_H, true); // DDDD
@@ -26649,6 +28312,39 @@ void emitter::emitDispInsHelp(
emitDispSveReg(id->idReg3(), id->idInsOpt(), false); // mmmmm
break;
+ case IF_SVE_CE_2A: // ................ ......nnnnn.DDDD -- SVE move predicate from vector
+ emitDispPredicateReg(id->idReg1(), insGetPredicateType(fmt), INS_OPTS_SCALABLE_B, true); // DDDD
+ emitDispSveReg(id->idReg2(), false); // nnnnn
+ break;
+ case IF_SVE_CE_2B: // .........i...ii. ......nnnnn.DDDD -- SVE move predicate from vector
+ emitDispPredicateReg(id->idReg1(), insGetPredicateType(fmt), INS_OPTS_SCALABLE_D, true); // DDDD
+ emitDispSveRegIndex(id->idReg2(), emitGetInsSC(id), false); // nnnnn
+ break;
+ case IF_SVE_CE_2C: // ..............i. ......nnnnn.DDDD -- SVE move predicate from vector
+ emitDispPredicateReg(id->idReg1(), insGetPredicateType(fmt), INS_OPTS_SCALABLE_H, true); // DDDD
+ emitDispSveRegIndex(id->idReg2(), emitGetInsSC(id), false); // nnnnn
+ break;
+ case IF_SVE_CE_2D: // .............ii. ......nnnnn.DDDD -- SVE move predicate from vector
+ emitDispPredicateReg(id->idReg1(), insGetPredicateType(fmt), INS_OPTS_SCALABLE_S, true); // DDDD
+ emitDispSveRegIndex(id->idReg2(), emitGetInsSC(id), false); // nnnnn
+ break;
+ case IF_SVE_CF_2A: // ................ .......NNNNddddd -- SVE move predicate into vector
+ emitDispSveReg(id->idReg1(), true); // ddddd
+ emitDispPredicateReg(id->idReg2(), insGetPredicateType(fmt), INS_OPTS_SCALABLE_B, false); // NNNN
+ break;
+ case IF_SVE_CF_2B: // .........i...ii. .......NNNNddddd -- SVE move predicate into vector
+ emitDispSveRegIndex(id->idReg1(), emitGetInsSC(id), true); // ddddd
+ emitDispPredicateReg(id->idReg2(), insGetPredicateType(fmt), INS_OPTS_SCALABLE_D, false); // NNNN
+ break;
+ case IF_SVE_CF_2C: // ..............i. .......NNNNddddd -- SVE move predicate into vector
+ emitDispSveRegIndex(id->idReg1(), emitGetInsSC(id), true); // ddddd
+ emitDispPredicateReg(id->idReg2(), insGetPredicateType(fmt), INS_OPTS_SCALABLE_H, false); // NNNN
+ break;
+ case IF_SVE_CF_2D: // .............ii. .......NNNNddddd -- SVE move predicate into vector
+ emitDispSveRegIndex(id->idReg1(), emitGetInsSC(id), true); // ddddd
+ emitDispPredicateReg(id->idReg2(), insGetPredicateType(fmt), INS_OPTS_SCALABLE_S, false); // NNNN
+ break;
+
// <Pd>.<T>, <Pn>.<T>, <Pm>.<T>
case IF_SVE_CI_3A: // ........xx..MMMM .......NNNN.DDDD -- SVE permute predicate elements
emitDispPredicateReg(id->idReg1(), insGetPredicateType(fmt, 1), id->idInsOpt(), true); // DDDD
@@ -26700,6 +28396,25 @@ void emitter::emitDispInsHelp(
emitDispSveReg(id->idReg3(), id->idInsOpt(), false); // mmmmm
break;
+ // MOV <Zd>.<T>, <Pv>/M, <Zn>.<T> or SEL <Zd>.<T>, <Pv>, <Zn>.<T>, <Zm>.<T>
+ case IF_SVE_CW_4A: // ........xx.mmmmm ..VVVVnnnnnddddd -- SVE select vector elements (predicated)
+ {
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd
+
+ if (id->idIns() == INS_sve_mov)
+ {
+ emitDispPredicateReg(id->idReg2(), PREDICATE_MERGE, id->idInsOpt(), true); // VVVV
+ emitDispSveReg(id->idReg3(), id->idInsOpt(), false); // nnnnn
+ }
+ else
+ {
+ emitDispPredicateReg(id->idReg2(), PREDICATE_NONE, id->idInsOpt(), true); // VVVV
+ emitDispSveReg(id->idReg3(), id->idInsOpt(), true); // nnnnn
+ emitDispSveReg(id->idReg4(), id->idInsOpt(), false); // mmmmm
+ }
+ break;
+ }
+
// <Pd>.<T>, <Pg>/Z, <Zn>.<T>, <Zm>.<T>
case IF_SVE_CX_4A: // ........xx.mmmmm ...gggnnnnn.DDDD -- SVE integer compare vectors
case IF_SVE_GE_4A: // ........xx.mmmmm ...gggnnnnn.DDDD -- SVE2 character match
@@ -26751,6 +28466,25 @@ void emitter::emitDispInsHelp(
emitDispElementIndex(emitGetInsSC(id), false); // ii/iii
break;
+ // <Zda>.S, <Zn>.H, <Zm>.H
+ case IF_SVE_EF_3A: // ...........mmmmm ......nnnnnddddd -- SVE two-way dot product
+ case IF_SVE_HA_3A: // ...........mmmmm ......nnnnnddddd -- SVE BFloat16 floating-point dot product
+ case IF_SVE_HB_3A: // ...........mmmmm ......nnnnnddddd -- SVE floating-point multiply-add long
+ case IF_SVE_HD_3A: // ...........mmmmm ......nnnnnddddd -- SVE floating point matrix multiply accumulate
+ case IF_SVE_EI_3A: // ...........mmmmm ......nnnnnddddd -- SVE mixed sign dot product
+ case IF_SVE_GO_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 FP8 multiply-add long long
+ emitDispSveReg(id->idReg1(), INS_OPTS_SCALABLE_S, true); // ddddd
+ emitDispSveReg(id->idReg2(), id->idInsOpt(), true); // nnnnn
+ emitDispSveReg(id->idReg3(), id->idInsOpt(), false); // mmmmm
+ break;
+
+ // <Zda>.S, <Zn>.B, <Zm>.B
+ case IF_SVE_HA_3A_F: // ...........mmmmm ......nnnnnddddd -- SVE BFloat16 floating-point dot product
+ emitDispSveReg(id->idReg1(), INS_OPTS_SCALABLE_S, true); // ddddd
+ emitDispSveReg(id->idReg2(), INS_OPTS_SCALABLE_B, true); // nnnnn
+ emitDispSveReg(id->idReg3(), INS_OPTS_SCALABLE_B, false); // mmmmm
+ break;
+
// <Zd>.D, <Zn>.S, <Zm>.S[<imm>]
case IF_SVE_FE_3B: // ...........immmm ....i.nnnnnddddd -- SVE2 integer multiply long (indexed)
case IF_SVE_FH_3B: // ...........immmm ....i.nnnnnddddd -- SVE2 saturating multiply (indexed)
@@ -27016,6 +28750,23 @@ void emitter::emitDispInsHelp(
emitDispSveReg(id->idReg2(), optWidenSveElemsizeArrangement(id->idInsOpt()), false); // nnnnn
break;
+ // <Xd|SP>, <Xn|SP>, #<imm>
+ case IF_SVE_BB_2A: // ...........nnnnn .....iiiiiiddddd -- SVE stack frame adjustment
+ {
+ const regNumber reg1 = (id->idReg1() == REG_ZR) ? REG_SP : id->idReg1();
+ const regNumber reg2 = (id->idReg2() == REG_ZR) ? REG_SP : id->idReg2();
+ emitDispReg(reg1, id->idOpSize(), true); // ddddd
+ emitDispReg(reg2, id->idOpSize(), true); // nnnnn
+ emitDispImm(emitGetInsSC(id), false); // iiiiii
+ break;
+ }
+
+ // <Xd>, #<imm>
+ case IF_SVE_BC_1A: // ................ .....iiiiiiddddd -- SVE stack frame size
+ emitDispReg(id->idReg1(), id->idOpSize(), true); // ddddd
+ emitDispImm(emitGetInsSC(id), false); // iiiiii
+ break;
+
// <Zd>.<T>, <Zn>.<Tb>, #<const>
case IF_SVE_FR_2A: // .........x.xxiii ......nnnnnddddd -- SVE2 bitwise shift left long
{
@@ -27171,6 +28922,8 @@ void emitter::emitDispInsHelp(
// <Zda>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
case IF_SVE_EH_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE integer dot product (unpredicated)
+ // <Zda>.S, <Zn>.B, <Zm>.B
+ case IF_SVE_FO_3A: // ...........mmmmm ......nnnnnddddd -- SVE integer matrix multiply accumulate
{
const insOpts smallSizeSpecifier = (insOpts)(id->idInsOpt() - 2);
emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd
@@ -27183,6 +28936,13 @@ void emitter::emitDispInsHelp(
case IF_SVE_EL_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer multiply-add long
case IF_SVE_EN_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 saturating multiply-add interleaved long
case IF_SVE_EO_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 saturating multiply-add long
+ case IF_SVE_FX_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer absolute difference and accumulate long
+ // <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ case IF_SVE_FL_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract long
+ case IF_SVE_FN_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer multiply long
+ case IF_SVE_FS_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract interleaved long
+ // <Zd>.Q, <Zn>.D, <Zm>.D
+ case IF_SVE_FN_3B: // ...........mmmmm ......nnnnnddddd -- SVE2 integer multiply long
{
const insOpts smallSizeSpecifier = (insOpts)(id->idInsOpt() - 1);
emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd
@@ -27191,6 +28951,26 @@ void emitter::emitDispInsHelp(
break;
}
+ // <Zd>.<T>, <Zn>.<Tb>, <Zm>.<Tb>
+ case IF_SVE_GC_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract narrow high part
+ {
+ const insOpts largeSizeSpecifier = (insOpts)(id->idInsOpt() + 1);
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd
+ emitDispSveReg(id->idReg2(), largeSizeSpecifier, true); // nnnnn
+ emitDispSveReg(id->idReg3(), largeSizeSpecifier, false); // mmmmm
+ break;
+ }
+
+ // <Zd>.<T>, <Zn>.<T>, <Zm>.<Tb>
+ case IF_SVE_FM_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract wide
+ {
+ const insOpts smallSizeSpecifier = (insOpts)(id->idInsOpt() - 1);
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd
+ emitDispSveReg(id->idReg2(), id->idInsOpt(), true); // nnnnn
+ emitDispSveReg(id->idReg3(), smallSizeSpecifier, false); // mmmmm
+ break;
+ }
+
// CDOT <Zda>.<T>, <Zn>.<Tb>, <Zm>.<Tb>, <const>
case IF_SVE_EJ_3A: // ........xx.mmmmm ....rrnnnnnddddd -- SVE2 complex integer dot product
{
@@ -27729,6 +29509,28 @@ void emitter::emitDispInsHelp(
emitDispSveReg(id->idReg4(), id->idInsOpt(), false);
break;
+ // <Zd>.B, { <Zn>.B }, <Zm>[<index>]
+ case IF_SVE_GG_3A: // ........ii.mmmmm ......nnnnnddddd -- SVE2 lookup table with 2-bit indices and 16-bit
+ // element size
+ // <Zd>.B, { <Zn>.B }, <Zm>[<index>]
+ case IF_SVE_GH_3A: // ........i..mmmmm ......nnnnnddddd -- SVE2 lookup table with 4-bit indices and 16-bit
+ // element size
+ // <Zd>.H, { <Zn>.H }, <Zm>[<index>]
+ case IF_SVE_GG_3B: // ........ii.mmmmm ...i..nnnnnddddd -- SVE2 lookup table with 2-bit indices and 16-bit
+ // element size
+ // <Zd>.H, { <Zn1>.H, <Zn2>.H }, <Zm>[<index>]
+ case IF_SVE_GH_3B: // ........ii.mmmmm ......nnnnnddddd -- SVE2 lookup table with 4-bit indices and 16-bit
+ // element size
+ // <Zd>.H, {<Zn>.H }, <Zm>[<index>]
+ case IF_SVE_GH_3B_B: // ........ii.mmmmm ......nnnnnddddd -- SVE2 lookup table with 4-bit indices and 16-bit
+ // element size
+ imm = emitGetInsSC(id);
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true);
+ emitDispSveConsecutiveRegList(id->idReg1(), 1, id->idInsOpt(), true);
+ emitDispSveReg(id->idReg2(), id->idInsOpt(), false);
+ emitDispElementIndex(imm, false);
+ break;
+
// <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]
// <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod> #1]
// <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod> #2]
@@ -27801,6 +29603,65 @@ void emitter::emitDispInsHelp(
emitDispSveImmIndex(id->idReg3(), id->idInsOpt(), imm);
break;
+ // <Zd>, <Zn>
+ case IF_SVE_BI_2A: // ................ ......nnnnnddddd -- SVE constructive prefix (unpredicated)
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true);
+ emitDispSveReg(id->idReg2(), id->idInsOpt(), false);
+ break;
+
+ // <Zd>.<T>, <R><n|SP>
+ case IF_SVE_CB_2A: // ........xx...... ......nnnnnddddd -- SVE broadcast general register
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true);
+ emitDispReg(encodingZRtoSP(id->idReg2()), size, false);
+ break;
+
+ // <Zd>.H, <Zn>.B
+ case IF_SVE_HH_2A: // ................ ......nnnnnddddd -- SVE2 FP8 upconverts
+ // <Zd>.<T>, <Zn>.<Tb>
+ case IF_SVE_CH_2A: // ........xx...... ......nnnnnddddd -- SVE unpack vector elements
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true);
+ emitDispSveReg(id->idReg2(), (insOpts)((unsigned)id->idInsOpt() - 1), false);
+ break;
+
+ // <Zd>.<T>, <Zn>.<T>
+ case IF_SVE_BJ_2A: // ........xx...... ......nnnnnddddd -- SVE floating-point exponential accelerator
+ // <Zd>.<T>, <Zn>.<T>
+ case IF_SVE_CG_2A: // ........xx...... ......nnnnnddddd -- SVE reverse vector elements
+ // <Zd>.<T>, <Zn>.<T>
+ case IF_SVE_HF_2A: // ........xx...... ......nnnnnddddd -- SVE floating-point reciprocal estimate (unpredicated)
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true);
+ emitDispSveReg(id->idReg2(), id->idInsOpt(), false);
+ break;
+
+ // <Zd>.<T>, <Zn>.<T>, #<const>
+ case IF_SVE_BF_2A: // ........xx.xxiii ......nnnnnddddd -- SVE bitwise shift by immediate (unpredicated)
+ // <Zd>.<T>, <Zn>.<T>, #<const>
+ case IF_SVE_FT_2A: // ........xx.xxiii ......nnnnnddddd -- SVE2 bitwise shift and insert
+ // <Zda>.<T>, <Zn>.<T>, #<const>
+ case IF_SVE_FU_2A: // ........xx.xxiii ......nnnnnddddd -- SVE2 bitwise shift right and accumulate
+ imm = emitGetInsSC(id);
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true);
+ emitDispSveReg(id->idReg2(), id->idInsOpt(), true);
+ emitDispImm(imm, false);
+ break;
+
+ // <Zd>.<T>, <Zn>.<T>[<imm>]
+ case IF_SVE_BX_2A: // ...........ixxxx ......nnnnnddddd -- sve_int_perm_dupq_i
+ imm = emitGetInsSC(id);
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true);
+ emitDispSveReg(id->idReg2(), id->idInsOpt(), false);
+ emitDispElementIndex(imm, false);
+ break;
+
+ // <Zdn>.B, <Zdn>.B, <Zm>.B, #<imm>
+ case IF_SVE_BY_2A: // ............iiii ......mmmmmddddd -- sve_int_perm_extq
+ imm = emitGetInsSC(id);
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true);
+ emitDispSveReg(id->idReg1(), id->idInsOpt(), true);
+ emitDispSveReg(id->idReg2(), id->idInsOpt(), true);
+ emitDispImm(imm, false);
+ break;
+
default:
printf("unexpected format %s", emitIfName(id->idInsFmt()));
assert(!"unexpectedFormat");
@@ -30124,6 +31985,7 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins
case IF_SVE_GU_3A: // ...........iimmm ......nnnnnddddd -- SVE floating-point multiply-add (indexed)
case IF_SVE_GU_3B: // ...........immmm ......nnnnnddddd -- SVE floating-point multiply-add (indexed)
+ case IF_SVE_GN_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 FP8 multiply-add long
result.insThroughput = PERFSCORE_THROUGHPUT_2C;
result.insLatency = PERFSCORE_LATENCY_4C;
break;
@@ -30152,13 +32014,87 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins
}
break;
- case IF_SVE_GU_3C: // .........i.iimmm ......nnnnnddddd -- SVE floating-point multiply-add (indexed)
- case IF_SVE_GX_3C: // .........i.iimmm ......nnnnnddddd -- SVE floating-point multiply (indexed)
- case IF_SVE_EW_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 multiply-add (checked pointer)
- case IF_SVE_EW_3B: // ...........mmmmm ......aaaaaddddd -- SVE2 multiply-add (checked pointer)
- case IF_SVE_CA_3A: // ........xx.mmmmm ......nnnnnddddd -- sve_int_perm_tbxquads
- case IF_SVE_EV_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE integer clamp
- case IF_SVE_EX_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE permute vector elements (quadwords)
+ case IF_SVE_HA_3A: // ...........mmmmm ......nnnnnddddd -- SVE BFloat16 floating-point dot product
+ switch (ins)
+ {
+ case INS_sve_fdot:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+ case INS_sve_bfdot:
+ result.insThroughput = PERFSCORE_THROUGHPUT_2C;
+ result.insLatency = PERFSCORE_LATENCY_4C;
+ break;
+ default:
+ // all other instructions
+ perfScoreUnhandledInstruction(id, &result);
+ break;
+ }
+ break;
+
+ case IF_SVE_HB_3A: // ...........mmmmm ......nnnnnddddd -- SVE floating-point multiply-add long
+ switch (ins)
+ {
+ case INS_sve_fmlalb:
+ case INS_sve_fmlalt:
+ case INS_sve_fmlslb:
+ case INS_sve_fmlslt:
+ case INS_sve_bfmlalb:
+ case INS_sve_bfmlalt:
+ result.insThroughput = PERFSCORE_THROUGHPUT_2C;
+ result.insLatency = PERFSCORE_LATENCY_4C;
+ break;
+ case INS_sve_bfmlslb:
+ case INS_sve_bfmlslt:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+ default:
+ // all other instructions
+ perfScoreUnhandledInstruction(id, &result);
+ break;
+ }
+ break;
+
+ case IF_SVE_AV_3A: // ...........mmmmm ......kkkkkddddd -- SVE2 bitwise ternary operations
+ switch (ins)
+ {
+ case INS_sve_eor3:
+ case INS_sve_bcax:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C;
+ result.insLatency = PERFSCORE_LATENCY_2C;
+ break;
+ case INS_sve_bsl:
+ case INS_sve_bsl1n:
+ case INS_sve_bsl2n:
+ case INS_sve_nbsl:
+ result.insThroughput = PERFSCORE_THROUGHPUT_2C;
+ result.insLatency = PERFSCORE_LATENCY_2C;
+ break;
+ default:
+ // all other instructions
+ perfScoreUnhandledInstruction(id, &result);
+ break;
+ }
+ break;
+
+ case IF_SVE_GU_3C: // .........i.iimmm ......nnnnnddddd -- SVE floating-point multiply-add (indexed)
+ case IF_SVE_GX_3C: // .........i.iimmm ......nnnnnddddd -- SVE floating-point multiply (indexed)
+ case IF_SVE_EW_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 multiply-add (checked pointer)
+ case IF_SVE_EW_3B: // ...........mmmmm ......aaaaaddddd -- SVE2 multiply-add (checked pointer)
+ case IF_SVE_CA_3A: // ........xx.mmmmm ......nnnnnddddd -- sve_int_perm_tbxquads
+ case IF_SVE_EV_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE integer clamp
+ case IF_SVE_EX_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE permute vector elements (quadwords)
+ case IF_SVE_GW_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE FP clamp
+ case IF_SVE_AT_3B: // ...........mmmmm ......nnnnnddddd -- SVE integer add/subtract vectors (unpredicated)
+ case IF_SVE_AB_3B: // ................ ...gggmmmmmddddd -- SVE integer add/subtract vectors (predicated)
+ case IF_SVE_HL_3B: // ................ ...gggmmmmmddddd -- SVE floating-point arithmetic (predicated)
+ case IF_SVE_GO_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 FP8 multiply-add long long
+ case IF_SVE_GW_3B: // ...........mmmmm ......nnnnnddddd -- SVE FP clamp
+ case IF_SVE_HA_3A_E: // ...........mmmmm ......nnnnnddddd -- SVE BFloat16 floating-point dot product
+ case IF_SVE_HA_3A_F: // ...........mmmmm ......nnnnnddddd -- SVE BFloat16 floating-point dot product
+ case IF_SVE_HD_3A_A: // ...........mmmmm ......nnnnnddddd -- SVE floating point matrix multiply accumulate
+ case IF_SVE_HK_3B: // ...........mmmmm ......nnnnnddddd -- SVE floating-point arithmetic (unpredicated)
result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
break;
@@ -30168,31 +32104,92 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins
case IF_SVE_BR_3B: // ...........mmmmm ......nnnnnddddd -- SVE permute vector segments
case IF_SVE_BZ_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE table lookup (three sources)
case IF_SVE_BZ_3A_A: // ........xx.mmmmm ......nnnnnddddd -- SVE table lookup (three sources)
+ case IF_SVE_FL_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract long
+ case IF_SVE_FM_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract wide
+ case IF_SVE_FP_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 bitwise exclusive-or interleaved
+ case IF_SVE_FS_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract interleaved long
+ case IF_SVE_GC_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer add/subtract narrow high part
+ case IF_SVE_GF_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 histogram generation (segment)
+ case IF_SVE_AU_3A: // ...........mmmmm ......nnnnnddddd -- SVE bitwise logical operations (unpredicated)
+ case IF_SVE_GI_4A: // ........xx.mmmmm ...gggnnnnnddddd -- SVE2 histogram generation (vector)
+ case IF_SVE_BB_2A: // ...........nnnnn .....iiiiiiddddd -- SVE stack frame adjustment
+ case IF_SVE_BC_1A: // ................ .....iiiiiiddddd -- SVE stack frame size
result.insThroughput = PERFSCORE_THROUGHPUT_2C;
result.insLatency = PERFSCORE_LATENCY_2C;
break;
+ case IF_SVE_FQ_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 bitwise permute
+ result.insThroughput = PERFSCORE_THROUGHPUT_2X;
+ result.insLatency = PERFSCORE_LATENCY_6C;
+ break;
+
+ case IF_SVE_FN_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer multiply long
+ switch (ins)
+ {
+ case INS_sve_smullb:
+ case INS_sve_smullt:
+ case INS_sve_umullb:
+ case INS_sve_umullt:
+ case INS_sve_sqdmullb:
+ case INS_sve_sqdmullt:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C;
+ result.insLatency = PERFSCORE_LATENCY_4C;
+ break;
+ case INS_sve_pmullb:
+ case INS_sve_pmullt:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C;
+ result.insLatency = PERFSCORE_LATENCY_2C;
+ break;
+ default:
+ // all other instructions
+ perfScoreUnhandledInstruction(id, &result);
+ break;
+ }
+ break;
+
case IF_SVE_BA_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE index generation (register start, register
// increment)
result.insThroughput = PERFSCORE_THROUGHPUT_2X;
result.insLatency = PERFSCORE_LATENCY_8C;
break;
+ case IF_SVE_BH_3A: // .........x.mmmmm ....hhnnnnnddddd -- SVE address generation
+ case IF_SVE_BH_3B: // ...........mmmmm ....hhnnnnnddddd -- SVE address generation
+ case IF_SVE_BH_3B_A: // ...........mmmmm ....hhnnnnnddddd -- SVE address generation
+ result.insThroughput = PERFSCORE_THROUGHPUT_2C;
+ result.insLatency = PERFSCORE_LATENCY_2C;
+ break;
+
case IF_SVE_BL_1A: // ............iiii ......pppppddddd -- SVE element count
result.insThroughput = PERFSCORE_THROUGHPUT_2C;
result.insLatency = PERFSCORE_LATENCY_2C;
break;
case IF_SVE_BK_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE floating-point trig select coefficient
+ case IF_SVE_FO_3A: // ...........mmmmm ......nnnnnddddd -- SVE integer matrix multiply accumulate
result.insThroughput = PERFSCORE_THROUGHPUT_2C;
result.insLatency = PERFSCORE_LATENCY_3C;
break;
case IF_SVE_BG_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE bitwise shift by wide elements (unpredicated)
+ case IF_SVE_FN_3B: // ...........mmmmm ......nnnnnddddd -- SVE2 integer multiply long
+ case IF_SVE_BD_3B: // ...........mmmmm ......nnnnnddddd -- SVE2 integer multiply vectors (unpredicated)
result.insThroughput = PERFSCORE_THROUGHPUT_1C;
result.insLatency = PERFSCORE_LATENCY_2C;
break;
+ case IF_SVE_CE_2A: // ................ ......nnnnn.DDDD -- SVE move predicate from vector
+ case IF_SVE_CE_2B: // .........i...ii. ......nnnnn.DDDD -- SVE move predicate from vector
+ case IF_SVE_CE_2C: // ..............i. ......nnnnn.DDDD -- SVE move predicate from vector
+ case IF_SVE_CE_2D: // .............ii. ......nnnnn.DDDD -- SVE move predicate from vector
+ case IF_SVE_CF_2A: // ................ .......NNNNddddd -- SVE move predicate into vector
+ case IF_SVE_CF_2B: // .........i...ii. .......NNNNddddd -- SVE move predicate into vector
+ case IF_SVE_CF_2C: // ..............i. .......NNNNddddd -- SVE move predicate into vector
+ case IF_SVE_CF_2D: // .............ii. .......NNNNddddd -- SVE move predicate into vector
+ result.insThroughput = PERFSCORE_THROUGHPUT_140C; // @ToDo currently undocumented
+ result.insLatency = PERFSCORE_LATENCY_140C;
+ break;
+
case IF_SVE_CI_3A: // ........xx..MMMM .......NNNN.DDDD -- SVE permute predicate elements
case IF_SVE_CJ_2A: // ........xx...... .......NNNN.DDDD -- SVE reverse predicate elements
case IF_SVE_CK_2A: // ................ .......NNNN.DDDD -- SVE unpack predicate elements
@@ -30238,6 +32235,11 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins
result.insThroughput = PERFSCORE_THROUGHPUT_1C;
break;
+ case IF_SVE_CW_4A: // ........xx.mmmmm ..VVVVnnnnnddddd -- SVE select vector elements (predicated)
+ result.insLatency = PERFSCORE_LATENCY_2C;
+ result.insThroughput = PERFSCORE_THROUGHPUT_2C;
+ break;
+
case IF_SVE_CX_4A: // ........xx.mmmmm ...gggnnnnn.DDDD -- SVE integer compare vectors
case IF_SVE_CX_4A_A: // ........xx.mmmmm ...gggnnnnn.DDDD -- SVE integer compare vectors
case IF_SVE_CY_3A: // ........xx.iiiii ...gggnnnnn.DDDD -- SVE integer compare with signed immediate
@@ -30257,10 +32259,30 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins
case IF_SVE_EL_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer multiply-add long
case IF_SVE_EN_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 saturating multiply-add interleaved long
case IF_SVE_EO_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 saturating multiply-add long
+ case IF_SVE_FW_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer absolute difference and accumulate
+ case IF_SVE_FX_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE2 integer absolute difference and accumulate long
result.insLatency = PERFSCORE_LATENCY_4C;
result.insThroughput = PERFSCORE_THROUGHPUT_1C;
break;
+ case IF_SVE_GJ_3A: // ...........mmmmm ......nnnnnddddd -- SVE2 crypto constructive binary operations
+ switch (ins)
+ {
+ case INS_sve_rax1:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C;
+ result.insLatency = PERFSCORE_LATENCY_2C;
+ break;
+ case INS_sve_sm4ekey:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C;
+ result.insLatency = PERFSCORE_LATENCY_4C;
+ break;
+ default:
+ // all other instructions
+ perfScoreUnhandledInstruction(id, &result);
+ break;
+ }
+ break;
+
case IF_SVE_GZ_3A: // ...........iimmm ....i.nnnnnddddd -- SVE floating-point multiply-add long (indexed)
switch (ins)
{
@@ -30509,12 +32531,9 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins
// Arithmetic, shift complex
case IF_SVE_EU_3A: // ........xx...... ...gggmmmmmddddd -- SVE2 saturating/rounding bitwise shift left
// (predicated)
- result.insLatency = PERFSCORE_LATENCY_4C;
- result.insThroughput = PERFSCORE_THROUGHPUT_1C;
- break;
-
// Arithmetic, pairwise add and accum long
case IF_SVE_EQ_3A: // ........xx...... ...gggnnnnnddddd -- SVE2 integer pairwise add and accumulate long
+ case IF_SVE_EF_3A: // ...........mmmmm ......nnnnnddddd -- SVE two-way dot product
result.insLatency = PERFSCORE_LATENCY_4C;
result.insThroughput = PERFSCORE_THROUGHPUT_1C;
break;
@@ -30543,6 +32562,34 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins
result.insThroughput = PERFSCORE_THROUGHPUT_2X;
break;
+ case IF_SVE_HK_3A: // ........xx.mmmmm ......nnnnnddddd -- SVE floating-point arithmetic (unpredicated)
+ switch (ins)
+ {
+ case INS_sve_frecps:
+ case INS_sve_frsqrts:
+ result.insThroughput = PERFSCORE_THROUGHPUT_2C;
+ result.insLatency = PERFSCORE_LATENCY_4C;
+ break;
+
+ case INS_sve_fmul:
+ case INS_sve_ftsmul:
+ result.insThroughput = PERFSCORE_THROUGHPUT_2C;
+ result.insLatency = PERFSCORE_LATENCY_3C;
+ break;
+
+ case INS_sve_fadd:
+ case INS_sve_fsub:
+ result.insThroughput = PERFSCORE_THROUGHPUT_2C;
+ result.insLatency = PERFSCORE_LATENCY_2C;
+ break;
+
+ default:
+ // all other instructions
+ perfScoreUnhandledInstruction(id, &result);
+ break;
+ }
+ break;
+
case IF_SVE_HL_3A: // ........xx...... ...gggmmmmmddddd -- SVE floating-point arithmetic (predicated)
switch (ins)
{
@@ -31416,12 +33463,14 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins
break;
case IF_SVE_GP_3A: // ........xx.....r ...gggmmmmmddddd -- SVE floating-point complex add (predicated)
+ case IF_SVE_EI_3A: // ...........mmmmm ......nnnnnddddd -- SVE mixed sign dot product
result.insThroughput = PERFSCORE_THROUGHPUT_2C;
result.insLatency = PERFSCORE_LATENCY_3C;
break;
case IF_SVE_GV_3A: // ...........immmm ....rrnnnnnddddd -- SVE floating-point complex multiply-add (indexed)
case IF_SVE_GT_4A: // ........xx.mmmmm .rrgggnnnnnddddd -- SVE floating-point complex multiply-add (predicated)
+ case IF_SVE_HD_3A: // ...........mmmmm ......nnnnnddddd -- SVE floating point matrix multiply accumulate
result.insThroughput = PERFSCORE_THROUGHPUT_2C;
result.insLatency = PERFSCORE_LATENCY_5C;
break;
@@ -31479,6 +33528,7 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins
case IF_SVE_HV_4A: // ........xx.aaaaa ...gggmmmmmddddd -- SVE floating-point multiply-accumulate writing
// multiplicand
+ case IF_SVE_HU_4A: // ........xx.mmmmm ...gggnnnnnddddd -- SVE floating-point multiply-accumulate writing addend
result.insThroughput = PERFSCORE_THROUGHPUT_2C;
result.insLatency = PERFSCORE_LATENCY_4C;
break;
@@ -31495,6 +33545,36 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins
result.insLatency = PERFSCORE_LATENCY_2C;
break;
+ case IF_SVE_GG_3A: // ........ii.mmmmm ......nnnnnddddd -- SVE2 lookup table with 2-bit indices and 16-bit
+ // element size
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+
+ case IF_SVE_GH_3B: // ........ii.mmmmm ......nnnnnddddd -- SVE2 lookup table with 4-bit indices and 16-bit
+ // element size
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+
+ case IF_SVE_GH_3B_B: // ........ii.mmmmm ......nnnnnddddd -- SVE2 lookup table with 4-bit indices and 16-bit
+ // element size
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+
+ case IF_SVE_GG_3B: // ........ii.mmmmm ...i..nnnnnddddd -- SVE2 lookup table with 2-bit indices and 16-bit
+ // element size
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+
+ case IF_SVE_GH_3A: // ........i..mmmmm ......nnnnnddddd -- SVE2 lookup table with 4-bit indices and 16-bit
+ // element size
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+
case IF_SVE_HY_3A: // .........h.mmmmm ...gggnnnnn.oooo -- SVE 32-bit gather prefetch (scalar plus 32-bit scaled
// offsets)
switch (ins)
@@ -31675,6 +33755,121 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins
result.insLatency = PERFSCORE_LATENCY_6C;
break;
+ case IF_SVE_BI_2A: // ................ ......nnnnnddddd -- SVE constructive prefix (unpredicated)
+ result.insThroughput = PERFSCORE_THROUGHPUT_2C;
+ result.insLatency = PERFSCORE_LATENCY_2C;
+ break;
+
+ case IF_SVE_HH_2A: // ................ ......nnnnnddddd -- SVE2 FP8 upconverts
+ switch (ins)
+ {
+ case INS_sve_f1cvt:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+ case INS_sve_f2cvt:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+ case INS_sve_bf1cvt:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+ case INS_sve_bf2cvt:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+ case INS_sve_f1cvtlt:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+ case INS_sve_f2cvtlt:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+ case INS_sve_bf1cvtlt:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+ case INS_sve_bf2cvtlt:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+ default:
+ // all other instructions
+ perfScoreUnhandledInstruction(id, &result);
+ break;
+ }
+ break;
+
+ case IF_SVE_BJ_2A: // ........xx...... ......nnnnnddddd -- SVE floating-point exponential accelerator
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C;
+ result.insLatency = PERFSCORE_LATENCY_3C;
+ break;
+
+ case IF_SVE_CB_2A: // ........xx...... ......nnnnnddddd -- SVE broadcast general register
+ switch (ins)
+ {
+ case INS_sve_mov:
+ result.insThroughput = PERFSCORE_THROUGHPUT_2C;
+ result.insLatency = PERFSCORE_LATENCY_2C;
+ break;
+ case INS_sve_dup:
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C;
+ result.insLatency = PERFSCORE_LATENCY_3C;
+ break;
+ default:
+ // all other instructions
+ perfScoreUnhandledInstruction(id, &result);
+ break;
+ }
+ break;
+
+ case IF_SVE_CG_2A: // ........xx...... ......nnnnnddddd -- SVE reverse vector elements
+ switch (ins)
+ {
+ case INS_sve_rev:
+ result.insThroughput = PERFSCORE_THROUGHPUT_2C;
+ result.insLatency = PERFSCORE_LATENCY_2C;
+ break;
+ default:
+ // all other instructions
+ perfScoreUnhandledInstruction(id, &result);
+ break;
+ }
+ break;
+
+ case IF_SVE_CH_2A: // ........xx...... ......nnnnnddddd -- SVE unpack vector elements
+ result.insThroughput = PERFSCORE_THROUGHPUT_2C;
+ result.insLatency = PERFSCORE_LATENCY_2C;
+ break;
+
+ case IF_SVE_HF_2A: // ........xx...... ......nnnnnddddd -- SVE floating-point reciprocal estimate (unpredicated)
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C;
+ result.insLatency = PERFSCORE_LATENCY_3C;
+ break;
+
+ case IF_SVE_BF_2A: // ........xx.xxiii ......nnnnnddddd -- SVE bitwise shift by immediate (unpredicated)
+ case IF_SVE_FT_2A: // ........xx.xxiii ......nnnnnddddd -- SVE2 bitwise shift and insert
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C;
+ result.insLatency = PERFSCORE_LATENCY_2C;
+ break;
+
+ case IF_SVE_FU_2A: // ........xx.xxiii ......nnnnnddddd -- SVE2 bitwise shift right and accumulate
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C;
+ result.insLatency = PERFSCORE_LATENCY_4C;
+ break;
+
+ case IF_SVE_BX_2A: // ...........ixxxx ......nnnnnddddd -- sve_int_perm_dupq_i
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+
+ case IF_SVE_BY_2A: // ............iiii ......mmmmmddddd -- sve_int_perm_extq
+ result.insThroughput = PERFSCORE_THROUGHPUT_1C; // need to fix
+ result.insLatency = PERFSCORE_LATENCY_1C; // need to fix
+ break;
+
default:
// all other instructions
perfScoreUnhandledInstruction(id, &result);
diff --git a/src/coreclr/jit/emitarm64.h b/src/coreclr/jit/emitarm64.h
index 3fa81419e960..e466311a1156 100644
--- a/src/coreclr/jit/emitarm64.h
+++ b/src/coreclr/jit/emitarm64.h
@@ -51,14 +51,16 @@ void emitDispBarrier(insBarrier barrier);
void emitDispShiftOpts(insOpts opt);
void emitDispExtendOpts(insOpts opt);
void emitDispSveExtendOpts(insOpts opt);
-void emitDispSveExtendOptsModN(insOpts opt, int n);
+void emitDispSveExtendOptsModN(insOpts opt, ssize_t imm);
void emitDispSveModAddr(instruction ins, regNumber reg1, regNumber reg2, insOpts opt, insFormat fmt);
void emitDispSveImm(regNumber reg1, ssize_t imm, insOpts opt);
void emitDispSveImmMulVl(regNumber reg1, ssize_t imm);
void emitDispSveImmIndex(regNumber reg1, insOpts opt, ssize_t imm);
void emitDispLSExtendOpts(insOpts opt);
void emitDispReg(regNumber reg, emitAttr attr, bool addComma);
+void emitDispSveReg(regNumber reg, bool addComma);
void emitDispSveReg(regNumber reg, insOpts opt, bool addComma);
+void emitDispSveRegIndex(regNumber reg, ssize_t index, bool addComma);
void emitDispVectorReg(regNumber reg, insOpts opt, bool addComma);
void emitDispVectorRegIndex(regNumber reg, emitAttr elemsize, ssize_t index, bool addComma);
void emitDispVectorRegList(regNumber firstReg, unsigned listSize, insOpts opt, bool addComma);
@@ -544,6 +546,12 @@ static code_t insEncodeSveElemsize_30_or_21(insFormat fmt, emitAttr size);
// Returns the encoding for the field 'i1:tszh:tszl' at bit locations '23-22:20-18'.
static code_t insEncodeSveElemsize_tszh_tszl_and_imm(const insOpts opt, const ssize_t imm);
+// Returns the encoding for the field 'tszh:tszl:imm3' at bit locations '23-22:20-19:18-16'.
+static code_t insEncodeSveElemsizeWithShift_tszh_tszl_imm3(const insOpts opt, ssize_t imm, bool isRightShift);
+
+// Returns the encoding for the field 'i1:tsz' at bit locations '20:19-16'.
+static code_t insEncodeSveElemsizeWithImmediate_i1_tsz(const insOpts opt, ssize_t imm);
+
// Returns the encoding to select the constant values 90 or 270 for an Arm64 SVE vector instruction
// This specifically encode the field 'rot' at bit location '16'.
static code_t insEncodeSveImm90_or_270_rot(ssize_t imm);
@@ -591,6 +599,49 @@ static code_t insEncodeSveElemsize_dtype_ld1w(instruction ins, insFormat fmt, em
// for the 'dtypeh' and 'dtypel' fields.
static code_t insEncodeSveElemsize_dtypeh_dtypel(instruction ins, insFormat fmt, emitAttr size, code_t code);
+// Encodes an immediate value in consecutive bits from most significant position 'hi' to least significant
+// position 'lo'.
+template <const size_t hi, const size_t lo>
+static code_t insEncodeUimm(size_t imm)
+{
+ // lo <= hi < 32
+ static_assert((hi >= lo) && (hi < sizeof(code_t) * BITS_PER_BYTE));
+
+ const size_t imm_bits = hi - lo + 1;
+ static_assert(imm_bits < sizeof(code_t) * BITS_PER_BYTE);
+
+ const size_t imm_max = 1 << imm_bits;
+ assert(imm < imm_max);
+
+ code_t result = static_cast<code_t>(imm << lo);
+ assert((result >> lo) == imm);
+ return result;
+}
+
+// Encodes an immediate value across two ranges of consecutive bits, splitting the bits of the immediate
+// value between them. The bit ranges are from hi1-lo1, and hi2-lo2 where the second range is at a less
+// significant position relative to the first.
+template <const size_t hi1, const size_t lo1, const size_t hi2, const size_t lo2>
+static code_t insEncodeSplitUimm(size_t imm)
+{
+ static_assert((hi1 >= lo1) && (lo1 > hi2) && (hi2 >= lo2));
+ static_assert(hi1 < sizeof(code_t) * BITS_PER_BYTE);
+
+ const size_t hi_bits = hi1 - lo1 + 1;
+ const size_t lo_bits = hi2 - lo2 + 1;
+
+ const size_t imm_max = 1 << (hi_bits + lo_bits);
+ assert(imm < imm_max);
+
+ const size_t hi_max = 1 << hi_bits;
+ const size_t lo_max = 1 << lo_bits;
+
+ size_t immhi = (imm >> lo_bits) & (hi_max - 1);
+ size_t immlo = imm & (lo_max - 1);
+
+ return insEncodeUimm<hi1, lo1>(immhi) | insEncodeUimm<hi2, lo2>(immlo);
+}
+
// Returns the encoding for the immediate value as 4-bits at bit locations '19-16'.
static code_t insEncodeSimm4_19_to_16(ssize_t imm);
@@ -621,6 +672,9 @@ static code_t insEncodeUimm5_MultipleOf4_20_to_16(ssize_t imm);
// Returns the encoding for the immediate value that is a multiple of 8 as 5-bits at bit locations '20-16'.
static code_t insEncodeUimm5_MultipleOf8_20_to_16(ssize_t imm);
+// Returns the encoding for the immediate value as 6-bits at bit locations '10-5'.
+static code_t insEncodeSimm6_10_to_5(ssize_t imm);
+
// Returns the encoding for the immediate value as 6-bits at bit locations '21-16'.
static code_t insEncodeSimm6_21_to_16(ssize_t imm);
@@ -645,6 +699,15 @@ static code_t insEncodeUimm2_11_to_10(ssize_t imm);
// Returns the encoding for the immediate value as 2-bits at bit locations '20-19'.
static code_t insEncodeUimm2_20_to_19(ssize_t imm);
+// Returns the encoding for the immediate value as 2-bits at bit locations '23-22'.
+static code_t insEncodeUimm2_23_to_22(ssize_t imm);
+
+// Returns the encoding for the immediate value as 1-bit at bit locations '23'.
+static code_t insEncodeUimm1_23(ssize_t imm);
+
+// Returns the encoding for the immediate value as 3-bits at bit locations '23-22' for high and '12' for low.
+static code_t insEncodeUimm3h3l_23_to_22_and_12(ssize_t imm);
+
// Returns the encoding for the immediate value as 1 bit at bit location '10'.
static code_t insEncodeImm1_10(ssize_t imm);
@@ -657,6 +720,9 @@ static code_t insEncodeImm1_22(ssize_t imm);
// Returns the encoding for the immediate value as 7-bits at bit locations '20-14'.
static code_t insEncodeUimm7_20_to_14(ssize_t imm);
+// Returns the encoding for the immediate value as 4-bits at bit locations '19-16'.
+static code_t insEncodeUimm4_19_to_16(ssize_t imm);
+
// Returns the encoding for the immediate value as 4-bits starting from 1, at bit locations '19-16'.
static code_t insEncodeUimm4From1_19_to_16(ssize_t imm);
@@ -776,6 +842,13 @@ static bool isValidUimm6_MultipleOf8(ssize_t value)
return (0 <= value) && (value <= 504) && (value % 8 == 0);
};
+template <const size_t bits>
+static bool isValidUimm(ssize_t value)
+{
+ constexpr size_t max = 1 << bits;
+ return (0 <= value) && (value < max);
+}
+
// Returns true if 'value' is a legal immediate 1 bit encoding (such as for PEXT).
static bool isValidImm1(ssize_t value)
{
@@ -1438,23 +1511,24 @@ void emitIns_R_R_R(instruction ins,
insOpts opt = INS_OPTS_NONE,
insScalableOpts sopt = INS_SCALABLE_OPTS_NONE);
-void emitIns_R_R_R_I(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- ssize_t imm,
- insOpts opt = INS_OPTS_NONE,
- emitAttr attrReg2 = EA_UNKNOWN);
-
-void emitInsSve_R_R_R_I(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- ssize_t imm,
- insOpts opt = INS_OPTS_NONE,
- emitAttr attrReg2 = EA_UNKNOWN);
+void emitIns_R_R_R_I(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ regNumber reg3,
+ ssize_t imm,
+ insOpts opt = INS_OPTS_NONE,
+ emitAttr attrReg2 = EA_UNKNOWN,
+ insScalableOpts sopt = INS_SCALABLE_OPTS_NONE);
+
+void emitInsSve_R_R_R_I(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ regNumber reg3,
+ ssize_t imm,
+ insOpts opt = INS_OPTS_NONE,
+ insScalableOpts sopt = INS_SCALABLE_OPTS_NONE);
void emitIns_R_R_R_I_I(instruction ins,
emitAttr attr,
diff --git a/src/coreclr/jit/emitloongarch64.cpp b/src/coreclr/jit/emitloongarch64.cpp
index 0ea2546213a7..86ba90fa36ed 100644
--- a/src/coreclr/jit/emitloongarch64.cpp
+++ b/src/coreclr/jit/emitloongarch64.cpp
@@ -4598,7 +4598,7 @@ void emitter::emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataR
int offset = 0;
DWORD lsl = 0;
- if (addr->OperGet() == GT_LEA)
+ if (addr->OperIs(GT_LEA))
{
offset = addr->AsAddrMode()->Offset();
if (addr->AsAddrMode()->gtScale > 0)
@@ -4980,7 +4980,7 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
{
emitIns_R_R_R(ins, attr, dst->GetRegNum(), src1->GetRegNum(), src2->GetRegNum());
}
- else if (dst->OperGet() == GT_MUL)
+ else if (dst->OperIs(GT_MUL))
{
if (!needCheckOv)
{
@@ -5048,10 +5048,14 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
// TODO-LOONGARCH64-CQ: here sign-extend dst when deal with 32bit data is too conservative.
if (EA_SIZE(attr) == EA_4BYTE)
+ {
emitIns_R_R_I(INS_slli_w, attr, dst->GetRegNum(), dst->GetRegNum(), 0);
+ }
}
else
{
+ assert(dst->OperIs(GT_ADD, GT_SUB));
+
regNumber regOp1 = src1->GetRegNum();
regNumber regOp2 = src2->GetRegNum();
regNumber saveOperReg1 = REG_NA;
@@ -5064,26 +5068,38 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
assert(REG_R21 != dst->GetRegNum());
assert(REG_RA != dst->GetRegNum());
- if (dst->GetRegNum() == regOp1)
+ if (dst->OperIs(GT_ADD))
{
- assert(REG_R21 != regOp1);
- assert(REG_RA != regOp1);
- saveOperReg1 = REG_R21;
- saveOperReg2 = regOp2;
- emitIns_R_R_R(INS_or, attr, REG_R21, regOp1, REG_R0);
+ saveOperReg1 = (dst->GetRegNum() == regOp1) ? regOp2 : regOp1;
}
- else if (dst->GetRegNum() == regOp2)
+ else
{
- assert(REG_R21 != regOp2);
- assert(REG_RA != regOp2);
- saveOperReg1 = regOp1;
- saveOperReg2 = REG_R21;
- emitIns_R_R_R(INS_or, attr, REG_R21, regOp2, REG_R0);
+ if (dst->GetRegNum() == regOp1)
+ {
+ assert(REG_R21 != regOp1);
+ assert(REG_RA != regOp1);
+ saveOperReg1 = REG_R21;
+ emitIns_R_R_R(INS_or, attr, REG_R21, regOp1, REG_R0);
+ }
+ else
+ {
+ saveOperReg1 = regOp1;
+ }
}
- else
+
+ if ((dst->gtFlags & GTF_UNSIGNED) == 0)
{
- saveOperReg1 = regOp1;
- saveOperReg2 = regOp2;
+ saveOperReg2 = dst->GetSingleTempReg();
+ assert((saveOperReg2 != REG_RA) && (saveOperReg2 != REG_R21));
+ assert(REG_RA != regOp1);
+ assert(saveOperReg2 != regOp2);
+
+ ssize_t ui6 = (attr == EA_4BYTE) ? 31 : 63;
+ if (dst->OperIs(GT_ADD))
+ {
+ emitIns_R_R_I(INS_srli_d, attr, REG_RA, regOp1, ui6);
+ }
+ emitIns_R_R_I(INS_srli_d, attr, saveOperReg2, regOp2, ui6);
}
}
@@ -5091,86 +5107,56 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
if (needCheckOv)
{
- if (dst->OperGet() == GT_ADD || dst->OperGet() == GT_SUB)
+ // ADD : A = B + C
+ // SUB : A = B - C <=> B = A + C
+ if ((dst->gtFlags & GTF_UNSIGNED) != 0)
{
- ssize_t imm;
- regNumber tempReg1;
- regNumber tempReg2;
- // ADD : A = B + C
- // SUB : C = A - B
- if ((dst->gtFlags & GTF_UNSIGNED) != 0)
+ // ADD: if A < B, goto overflow
+ // SUB: if B < A, goto overflow
+ codeGen->genJumpToThrowHlpBlk_la(SCK_OVERFLOW, INS_bltu,
+ dst->OperIs(GT_ADD) ? dst->GetRegNum() : saveOperReg1, nullptr,
+ dst->OperIs(GT_ADD) ? saveOperReg1 : dst->GetRegNum());
+ }
+ else
+ {
+ if (dst->OperIs(GT_SUB))
{
- // if A < B, goto overflow
- if (dst->OperGet() == GT_ADD)
- {
- tempReg1 = dst->GetRegNum();
- tempReg2 = saveOperReg1;
- }
- else
- {
- tempReg1 = saveOperReg1;
- tempReg2 = saveOperReg2;
- }
- codeGen->genJumpToThrowHlpBlk_la(SCK_OVERFLOW, INS_bltu, tempReg1, nullptr, tempReg2);
+ emitIns_R_R_I(INS_srli_d, attr, REG_RA, dst->GetRegNum(), (attr == EA_4BYTE) ? 31 : 63);
}
- else
- {
- tempReg1 = REG_RA;
- tempReg2 = dst->GetSingleTempReg();
- assert(tempReg1 != tempReg2);
- assert(tempReg1 != saveOperReg1);
- assert(tempReg2 != saveOperReg2);
-
- ssize_t ui6 = (attr == EA_4BYTE) ? 31 : 63;
- if (dst->OperGet() == GT_ADD)
- {
- emitIns_R_R_I(INS_srli_d, attr, tempReg1, saveOperReg1, ui6);
- }
- else
- {
- emitIns_R_R_I(INS_srli_d, attr, tempReg1, dst->GetRegNum(), ui6);
- }
- emitIns_R_R_I(INS_srli_d, attr, tempReg2, saveOperReg2, ui6);
- emitIns_R_R_R(INS_xor, attr, tempReg1, tempReg1, tempReg2);
- if (attr == EA_4BYTE)
- {
- imm = 1;
- emitIns_R_R_I(INS_andi, attr, tempReg1, tempReg1, imm);
- emitIns_R_R_I(INS_andi, attr, tempReg2, tempReg2, imm);
- }
- // if (B > 0 && C < 0) || (B < 0 && C > 0), skip overflow
- BasicBlock* tmpLabel = codeGen->genCreateTempLabel();
- BasicBlock* tmpLabel2 = codeGen->genCreateTempLabel();
- BasicBlock* tmpLabel3 = codeGen->genCreateTempLabel();
+ emitIns_R_R_R(INS_xor, attr, REG_RA, REG_RA, saveOperReg2);
+ if (attr == EA_4BYTE)
+ {
+ emitIns_R_R_I(INS_andi, attr, REG_RA, REG_RA, 1);
+ emitIns_R_R_I(INS_andi, attr, saveOperReg2, saveOperReg2, 1);
+ }
+ // ADD: if (B > 0 && C < 0) || (B < 0 && C > 0), skip overflow
+ // SUB: if (A > 0 && C < 0) || (A < 0 && C > 0), skip overflow
+ BasicBlock* tmpLabel1 = codeGen->genCreateTempLabel();
+ BasicBlock* tmpLabel2 = codeGen->genCreateTempLabel();
+ BasicBlock* tmpLabel3 = codeGen->genCreateTempLabel();
- emitIns_J_cond_la(INS_bne, tmpLabel, tempReg1, REG_R0);
+ emitIns_J_cond_la(INS_bne, tmpLabel1, REG_RA, REG_R0);
- emitIns_J_cond_la(INS_bne, tmpLabel3, tempReg2, REG_R0);
+ emitIns_J_cond_la(INS_bne, tmpLabel3, saveOperReg2, REG_R0);
- // B > 0 and C > 0, if A < B, goto overflow
- emitIns_J_cond_la(INS_bge, tmpLabel, dst->OperGet() == GT_ADD ? dst->GetRegNum() : saveOperReg1,
- dst->OperGet() == GT_ADD ? saveOperReg1 : saveOperReg2);
+ // ADD: B > 0 and C > 0, if A < B, goto overflow
+ // SUB: A > 0 and C > 0, if B < A, goto overflow
+ emitIns_J_cond_la(INS_bge, tmpLabel1, dst->OperIs(GT_ADD) ? dst->GetRegNum() : saveOperReg1,
+ dst->OperIs(GT_ADD) ? saveOperReg1 : dst->GetRegNum());
- codeGen->genDefineTempLabel(tmpLabel2);
+ codeGen->genDefineTempLabel(tmpLabel2);
- codeGen->genJumpToThrowHlpBlk(EJ_jmp, SCK_OVERFLOW);
+ codeGen->genJumpToThrowHlpBlk(EJ_jmp, SCK_OVERFLOW);
- codeGen->genDefineTempLabel(tmpLabel3);
+ codeGen->genDefineTempLabel(tmpLabel3);
- // B < 0 and C < 0, if A > B, goto overflow
- emitIns_J_cond_la(INS_blt, tmpLabel2, dst->OperGet() == GT_ADD ? saveOperReg1 : saveOperReg2,
- dst->OperGet() == GT_ADD ? dst->GetRegNum() : saveOperReg1);
+ // ADD: B < 0 and C < 0, if A > B, goto overflow
+ // SUB: A < 0 and C < 0, if B > A, goto overflow
+ emitIns_J_cond_la(INS_blt, tmpLabel2, dst->OperIs(GT_ADD) ? saveOperReg1 : dst->GetRegNum(),
+ dst->OperIs(GT_ADD) ? dst->GetRegNum() : saveOperReg1);
- codeGen->genDefineTempLabel(tmpLabel);
- }
- }
- else
- {
-#ifdef DEBUG
- printf("---------[LOONGARCH64]-NOTE: UnsignedOverflow instruction %d\n", ins);
-#endif
- assert(!"unimplemented on LOONGARCH yet");
+ codeGen->genDefineTempLabel(tmpLabel1);
}
}
}
diff --git a/src/coreclr/jit/emitriscv64.cpp b/src/coreclr/jit/emitriscv64.cpp
index 66cb371aeeec..9e9f90041709 100644
--- a/src/coreclr/jit/emitriscv64.cpp
+++ b/src/coreclr/jit/emitriscv64.cpp
@@ -625,7 +625,8 @@ void emitter::emitIns_R_R(
assert(isGeneralRegisterOrR0(reg2));
code |= (reg1 & 0x1f) << 7;
code |= reg2 << 15;
- code |= 0x7 << 12;
+ if (INS_fcvt_d_w != ins && INS_fcvt_d_wu != ins) // fcvt.d.w[u] always produces an exact result
+ code |= 0x7 << 12; // round according to frm status register
}
else if (INS_fcvt_s_d == ins || INS_fcvt_d_s == ins)
{
@@ -633,7 +634,8 @@ void emitter::emitIns_R_R(
assert(isFloatReg(reg2));
code |= (reg1 & 0x1f) << 7;
code |= (reg2 & 0x1f) << 15;
- code |= 0x7 << 12;
+ if (INS_fcvt_d_s != ins) // fcvt.d.s never rounds
+ code |= 0x7 << 12; // round according to frm status register
}
else
{
diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp
index d429f0d261f7..3580b02db4df 100644
--- a/src/coreclr/jit/emitxarch.cpp
+++ b/src/coreclr/jit/emitxarch.cpp
@@ -15330,13 +15330,10 @@ BYTE* emitter::emitOutputRI(BYTE* dst, instrDesc* id)
if (id->idIsCnsReloc())
{
- if (emitComp->IsTargetAbi(CORINFO_NATIVEAOT_ABI))
+ if (emitComp->IsTargetAbi(CORINFO_NATIVEAOT_ABI) && id->idAddr()->iiaSecRel)
{
- if (id->idAddr()->iiaSecRel)
- {
- // For section relative, the immediate offset is relocatable and hence need IMAGE_REL_SECREL
- emitRecordRelocation((void*)(dst - (unsigned)EA_SIZE(size)), (void*)(size_t)val, IMAGE_REL_SECREL);
- }
+ // For section relative, the immediate offset is relocatable and hence need IMAGE_REL_SECREL
+ emitRecordRelocation((void*)(dst - (unsigned)EA_SIZE(size)), (void*)(size_t)val, IMAGE_REL_SECREL);
}
else
{
diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp
index ee0f99b32ce8..6fd9a752d493 100644
--- a/src/coreclr/jit/fgbasic.cpp
+++ b/src/coreclr/jit/fgbasic.cpp
@@ -206,17 +206,10 @@ bool Compiler::fgEnsureFirstBBisScratch()
assert(fgFirstBBScratch == nullptr);
- BasicBlock* block = BasicBlock::New(this, BBJ_ALWAYS, fgFirstBB);
- block->SetFlags(BBF_NONE_QUIRK);
+ BasicBlock* block;
if (fgFirstBB != nullptr)
{
- // If we have profile data the new block will inherit fgFirstBlock's weight
- if (fgFirstBB->hasProfileWeight())
- {
- block->inheritWeight(fgFirstBB);
- }
-
// The first block has an implicit ref count which we must
// remove. Note the ref count could be greater than one, if
// the first block is not scratch and is targeted by a
@@ -224,14 +217,24 @@ bool Compiler::fgEnsureFirstBBisScratch()
assert(fgFirstBB->bbRefs >= 1);
fgFirstBB->bbRefs--;
+ block = BasicBlock::New(this);
+
+ // If we have profile data the new block will inherit fgFirstBlock's weight
+ if (fgFirstBB->hasProfileWeight())
+ {
+ block->inheritWeight(fgFirstBB);
+ }
+
// The new scratch bb will fall through to the old first bb
FlowEdge* const edge = fgAddRefPred(fgFirstBB, block);
edge->setLikelihood(1.0);
+ block->SetKindAndTargetEdge(BBJ_ALWAYS, edge);
fgInsertBBbefore(fgFirstBB, block);
}
else
{
noway_assert(fgLastBB == nullptr);
+ block = BasicBlock::New(this, BBJ_ALWAYS);
fgFirstBB = block;
fgLastBB = block;
}
@@ -239,7 +242,7 @@ bool Compiler::fgEnsureFirstBBisScratch()
noway_assert(fgLastBB != nullptr);
// Set the expected flags
- block->SetFlags(BBF_INTERNAL | BBF_IMPORTED);
+ block->SetFlags(BBF_INTERNAL | BBF_IMPORTED | BBF_NONE_QUIRK);
// This new first BB has an implicit ref, and no others.
//
@@ -357,7 +360,7 @@ void Compiler::fgConvertBBToThrowBB(BasicBlock* block)
fgRemoveBlockAsPred(block);
// Update jump kind after the scrub.
- block->SetKindAndTarget(BBJ_THROW);
+ block->SetKindAndTargetEdge(BBJ_THROW);
block->RemoveFlags(BBF_RETLESS_CALL); // no longer a BBJ_CALLFINALLY
// Any block with a throw is rare
@@ -645,9 +648,9 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* oldTarget, Bas
case BBJ_LEAVE: // This function can be called before import, so we still have BBJ_LEAVE
{
assert(block->TargetIs(oldTarget));
- block->SetTarget(newTarget);
- FlowEdge* const oldEdge = fgRemoveRefPred(oldTarget, block);
- fgAddRefPred(newTarget, block, oldEdge);
+ fgRemoveRefPred(block->GetTargetEdge());
+ FlowEdge* const newEdge = fgAddRefPred(newTarget, block, block->GetTargetEdge());
+ block->SetTargetEdge(newEdge);
break;
}
@@ -655,44 +658,50 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* oldTarget, Bas
if (block->TrueTargetIs(oldTarget))
{
- if (block->FalseTargetIs(oldTarget))
+ FlowEdge* const oldEdge = block->GetTrueEdge();
+
+ if (block->FalseEdgeIs(oldEdge))
{
// fgRemoveRefPred returns nullptr for BBJ_COND blocks with two flow edges to target
fgRemoveConditionalJump(block);
assert(block->KindIs(BBJ_ALWAYS));
assert(block->TargetIs(oldTarget));
- block->SetTarget(newTarget);
- }
- else
- {
- block->SetTrueTarget(newTarget);
}
// fgRemoveRefPred should have removed the flow edge
- FlowEdge* oldEdge = fgRemoveRefPred(oldTarget, block);
- assert(oldEdge != nullptr);
+ fgRemoveRefPred(oldEdge);
+ assert(oldEdge->getDupCount() == 0);
// TODO-NoFallThrough: Proliferate weight from oldEdge
// (as a quirk, we avoid doing so for the true target to reduce diffs for now)
FlowEdge* const newEdge = fgAddRefPred(newTarget, block);
+
if (block->KindIs(BBJ_ALWAYS))
{
newEdge->setLikelihood(1.0);
+ block->SetTargetEdge(newEdge);
}
- else if (oldEdge->hasLikelihood())
+ else
{
- newEdge->setLikelihood(oldEdge->getLikelihood());
+ assert(block->KindIs(BBJ_COND));
+ block->SetTrueEdge(newEdge);
+
+ if (oldEdge->hasLikelihood())
+ {
+ newEdge->setLikelihood(oldEdge->getLikelihood());
+ }
}
}
else
{
assert(block->FalseTargetIs(oldTarget));
+ FlowEdge* const oldEdge = block->GetFalseEdge();
// fgRemoveRefPred should have removed the flow edge
- FlowEdge* oldEdge = fgRemoveRefPred(oldTarget, block);
- assert(oldEdge != nullptr);
- block->SetFalseTarget(newTarget);
- fgAddRefPred(newTarget, block, oldEdge);
+ fgRemoveRefPred(oldEdge);
+ assert(oldEdge->getDupCount() == 0);
+ FlowEdge* const newEdge = fgAddRefPred(newTarget, block, oldEdge);
+ block->SetFalseEdge(newEdge);
}
break;
@@ -712,10 +721,8 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* oldTarget, Bas
}
}
- if (changed)
- {
- InvalidateUniqueSwitchSuccMap();
- }
+ assert(changed);
+ InvalidateUniqueSwitchSuccMap();
break;
}
@@ -731,54 +738,6 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* oldTarget, Bas
}
//------------------------------------------------------------------------
-// fgReplacePred: update the predecessor list, swapping one pred for another
-//
-// Arguments:
-// block - block with the pred list we want to update
-// oldPred - pred currently appearing in block's pred list
-// newPred - pred that will take oldPred's place.
-//
-// Notes:
-//
-// A block can only appear once in the preds list. If a predecessor has multiple
-// ways to get to this block, then the pred edge DupCount will be >1.
-//
-// This function assumes that all branches from the predecessor (practically, that all
-// switch cases that target this block) are changed to branch from the new predecessor,
-// with the same dup count.
-//
-// Note that the block bbRefs is not changed, since 'block' has the same number of
-// references as before, just from a different predecessor block.
-//
-// Also note this may cause sorting of the pred list.
-//
-void Compiler::fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred)
-{
- noway_assert(block != nullptr);
- noway_assert(oldPred != nullptr);
- noway_assert(newPred != nullptr);
-
- bool modified = false;
-
- for (FlowEdge* const pred : block->PredEdges())
- {
- if (oldPred == pred->getSourceBlock())
- {
- pred->setSourceBlock(newPred);
- modified = true;
- break;
- }
- }
-
- // We may now need to reorder the pred list.
- //
- if (modified)
- {
- block->ensurePredListOrder(this);
- }
-}
-
-//------------------------------------------------------------------------
// fgReplacePred: redirects the given edge to a new predecessor block
//
// Arguments:
@@ -1329,8 +1288,10 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, Fixed
break;
}
+ case NI_System_SpanHelpers_ClearWithoutReferences:
+ case NI_System_SpanHelpers_Fill:
case NI_System_SpanHelpers_SequenceEqual:
- case NI_System_Buffer_Memmove:
+ case NI_System_SpanHelpers_Memmove:
{
if (FgStack::IsConstArgument(pushedStack.Top(), impInlineInfo))
{
@@ -2963,10 +2924,21 @@ void Compiler::fgLinkBasicBlocks()
{
BasicBlock* const trueTarget = fgLookupBB(curBBdesc->GetTargetOffs());
BasicBlock* const falseTarget = curBBdesc->Next();
- curBBdesc->SetTrueTarget(trueTarget);
- curBBdesc->SetFalseTarget(falseTarget);
- fgAddRefPred<initializingPreds>(trueTarget, curBBdesc);
- fgAddRefPred<initializingPreds>(falseTarget, curBBdesc);
+ FlowEdge* const trueEdge = fgAddRefPred<initializingPreds>(trueTarget, curBBdesc);
+ FlowEdge* const falseEdge = fgAddRefPred<initializingPreds>(falseTarget, curBBdesc);
+ curBBdesc->SetTrueEdge(trueEdge);
+ curBBdesc->SetFalseEdge(falseEdge);
+
+ if (trueEdge == falseEdge)
+ {
+ assert(trueEdge->getDupCount() == 2);
+ trueEdge->setLikelihood(1.0);
+ }
+ else
+ {
+ trueEdge->setLikelihood(0.5);
+ falseEdge->setLikelihood(0.5);
+ }
if (trueTarget->bbNum <= curBBdesc->bbNum)
{
@@ -2989,10 +2961,11 @@ void Compiler::fgLinkBasicBlocks()
assert(!(curBBdesc->IsLast() && jumpsToNext));
BasicBlock* const jumpDest = jumpsToNext ? curBBdesc->Next() : fgLookupBB(curBBdesc->GetTargetOffs());
- // Redundantly use SetKindAndTarget() instead of SetTarget() just this once,
- // so we don't break the HasInitializedTarget() invariant of SetTarget().
- curBBdesc->SetKindAndTarget(curBBdesc->GetKind(), jumpDest);
- fgAddRefPred<initializingPreds>(jumpDest, curBBdesc);
+ // Redundantly use SetKindAndTargetEdge() instead of SetTargetEdge() just this once,
+ // so we don't break the HasInitializedTarget() invariant of SetTargetEdge().
+ FlowEdge* const newEdge = fgAddRefPred<initializingPreds>(jumpDest, curBBdesc);
+ newEdge->setLikelihood(1.0);
+ curBBdesc->SetKindAndTargetEdge(curBBdesc->GetKind(), newEdge);
if (curBBdesc->GetTarget()->bbNum <= curBBdesc->bbNum)
{
@@ -3018,14 +2991,17 @@ void Compiler::fgLinkBasicBlocks()
case BBJ_SWITCH:
{
- unsigned jumpCnt = curBBdesc->GetSwitchTargets()->bbsCount;
- FlowEdge** jumpPtr = curBBdesc->GetSwitchTargets()->bbsDstTab;
+ const unsigned numSucc = curBBdesc->GetSwitchTargets()->bbsCount;
+ unsigned jumpCnt = numSucc;
+ FlowEdge** jumpPtr = curBBdesc->GetSwitchTargets()->bbsDstTab;
do
{
BasicBlock* jumpDest = fgLookupBB((unsigned)*(size_t*)jumpPtr);
FlowEdge* const newEdge = fgAddRefPred<initializingPreds>(jumpDest, curBBdesc);
- *jumpPtr = newEdge;
+
+ newEdge->setLikelihood((1.0 / numSucc) * newEdge->getDupCount());
+ *jumpPtr = newEdge;
if (jumpDest->bbNum <= curBBdesc->bbNum)
{
fgMarkBackwardJump(jumpDest, curBBdesc);
@@ -3567,7 +3543,7 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F
noway_assert(codeAddr == codeEndp);
- /* Finally link up the bbTarget of the blocks together */
+ /* Finally link up the targets of the blocks together */
fgLinkBasicBlocks();
@@ -3885,10 +3861,10 @@ void Compiler::fgFindBasicBlocks()
if (block->KindIs(BBJ_EHFILTERRET))
{
// Mark catch handler as successor.
- block->SetTarget(hndBegBB);
FlowEdge* const newEdge = fgAddRefPred(hndBegBB, block);
newEdge->setLikelihood(1.0);
- assert(block->GetTarget()->bbCatchTyp == BBCT_FILTER_HANDLER);
+ block->SetTargetEdge(newEdge);
+ assert(hndBegBB->bbCatchTyp == BBCT_FILTER_HANDLER);
break;
}
}
@@ -4220,10 +4196,10 @@ void Compiler::fgFixEntryFlowForOSR()
//
fgEnsureFirstBBisScratch();
assert(fgFirstBB->KindIs(BBJ_ALWAYS) && fgFirstBB->JumpsToNext());
- fgRemoveRefPred(fgFirstBB->GetTarget(), fgFirstBB);
- fgFirstBB->SetKindAndTarget(BBJ_ALWAYS, fgOSREntryBB);
- FlowEdge* const edge = fgAddRefPred(fgOSREntryBB, fgFirstBB);
- edge->setLikelihood(1.0);
+ fgRemoveRefPred(fgFirstBB->GetTargetEdge());
+ FlowEdge* const newEdge = fgAddRefPred(fgOSREntryBB, fgFirstBB);
+ newEdge->setLikelihood(1.0);
+ fgFirstBB->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
// We don't know the right weight for this block, since
// execution of the method was interrupted within the
@@ -4772,14 +4748,15 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr)
{
// For each successor of the original block, set the new block as their predecessor.
- for (BasicBlock* const succ : curr->Succs(this))
+ for (FlowEdge* const succEdge : curr->SuccEdges())
{
- if (succ != newBlock)
- {
- JITDUMP(FMT_BB " previous predecessor was " FMT_BB ", now is " FMT_BB "\n", succ->bbNum, curr->bbNum,
- newBlock->bbNum);
- fgReplacePred(succ, curr, newBlock);
- }
+ // For non-switch blocks, successor iterator should not iterate duplicates.
+ assert(succEdge->getSourceBlock() != newBlock);
+
+ BasicBlock* const succBlock = succEdge->getDestinationBlock();
+ JITDUMP(FMT_BB " previous predecessor was " FMT_BB ", now is " FMT_BB "\n", succBlock->bbNum, curr->bbNum,
+ newBlock->bbNum);
+ fgReplacePred(succEdge, newBlock);
}
}
@@ -4811,19 +4788,18 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr)
// Remove flags from the old block that are no longer possible.
curr->RemoveFlags(BBF_HAS_JMP | BBF_RETLESS_CALL);
+ // Default to fallthrough, and add the arc for that.
+ FlowEdge* const newEdge = fgAddRefPred(newBlock, curr);
+ newEdge->setLikelihood(1.0);
+
// Transfer the kind and target. Do this after the code above, to avoid null-ing out the old targets used by the
- // above code (and so newBlock->bbNext is valid, so SetCond() can initialize bbFalseTarget if newBlock is a
- // BBJ_COND).
+ // above code.
newBlock->TransferTarget(curr);
- // Default to fallthrough, and add the arc for that.
- curr->SetKindAndTarget(BBJ_ALWAYS, newBlock);
+ curr->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
curr->SetFlags(BBF_NONE_QUIRK);
assert(curr->JumpsToNext());
- FlowEdge* const newEdge = fgAddRefPred(newBlock, curr);
- newEdge->setLikelihood(1.0);
-
return newBlock;
}
@@ -5046,15 +5022,14 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ)
// an immediately following block of a BBJ_SWITCH (which has
// no fall-through path). For this case, simply insert a new
// fall-through block after 'curr'.
- // TODO-NoFallThrough: Once bbFalseTarget can diverge from bbNext, this will be unnecessary for BBJ_COND
- newBlock = fgNewBBafter(BBJ_ALWAYS, curr, true /* extendRegion */, /* jumpDest */ succ);
+ // TODO-NoFallThrough: Once false target can diverge from bbNext, this will be unnecessary for BBJ_COND
+ newBlock = fgNewBBafter(BBJ_ALWAYS, curr, true /* extendRegion */);
newBlock->SetFlags(BBF_NONE_QUIRK);
- assert(newBlock->JumpsToNext());
}
else
{
// The new block always jumps to 'succ'
- newBlock = fgNewBBinRegion(BBJ_ALWAYS, curr, /* jumpDest */ succ, /* isRunRarely */ curr->isRunRarely());
+ newBlock = fgNewBBinRegion(BBJ_ALWAYS, curr, /* isRunRarely */ curr->isRunRarely());
}
newBlock->CopyFlags(curr, succ->GetFlagsRaw() & BBF_BACKWARD_JUMP);
@@ -5067,6 +5042,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ)
// And 'succ' has 'newBlock' as a new predecessor.
FlowEdge* const newEdge = fgAddRefPred(succ, newBlock);
newEdge->setLikelihood(1.0);
+ newBlock->SetTargetEdge(newEdge);
// This isn't accurate, but it is complex to compute a reasonable number so just assume that we take the
// branch 50% of the time.
@@ -5327,7 +5303,7 @@ BasicBlock* Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
* First, remove 'block' from the predecessor list of succBlock.
*/
- fgRemoveRefPred(succBlock, block);
+ fgRemoveRefPred(block->GetTargetEdge());
for (BasicBlock* const predBlock : block->PredBlocks())
{
@@ -5369,7 +5345,7 @@ BasicBlock* Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
assert(!bPrev->FalseTargetIs(block));
/* Check if both sides of the BBJ_COND now jump to the same block */
- if (bPrev->TrueTargetIs(bPrev->GetFalseTarget()))
+ if (bPrev->TrueEdgeIs(bPrev->GetFalseEdge()))
{
fgRemoveConditionalJump(bPrev);
}
@@ -5445,16 +5421,19 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst)
if (bSrc->KindIs(BBJ_COND) && bSrc->FalseTargetIs(bDst) && !bSrc->NextIs(bDst))
{
// Add a new block after bSrc which jumps to 'bDst'
- jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true, bDst);
- bSrc->SetFalseTarget(jmpBlk);
- fgAddRefPred(jmpBlk, bSrc, fgGetPredForBlock(bDst, bSrc));
+ jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true);
+ FlowEdge* oldEdge = bSrc->GetFalseEdge();
+ fgReplacePred(oldEdge, jmpBlk);
+ jmpBlk->SetTargetEdge(oldEdge);
+ assert(jmpBlk->TargetIs(bDst));
+
+ FlowEdge* newEdge = fgAddRefPred(jmpBlk, bSrc, oldEdge);
+ bSrc->SetFalseEdge(newEdge);
// When adding a new jmpBlk we will set the bbWeight and bbFlags
//
if (fgHaveValidEdgeWeights && fgHaveProfileWeights())
{
- FlowEdge* const newEdge = fgGetPredForBlock(jmpBlk, bSrc);
-
jmpBlk->bbWeight = (newEdge->edgeWeightMin() + newEdge->edgeWeightMax()) / 2;
if (bSrc->bbWeight == BB_ZERO_WEIGHT)
{
@@ -5492,8 +5471,6 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst)
}
}
- fgReplacePred(bDst, bSrc, jmpBlk);
-
JITDUMP("Added an unconditional jump to " FMT_BB " after block " FMT_BB "\n", jmpBlk->GetTarget()->bbNum,
bSrc->bbNum);
}
@@ -6050,14 +6027,11 @@ DONE:
* Insert a BasicBlock before the given block.
*/
-BasicBlock* Compiler::fgNewBBbefore(BBKinds jumpKind,
- BasicBlock* block,
- bool extendRegion,
- BasicBlock* jumpDest /* = nullptr */)
+BasicBlock* Compiler::fgNewBBbefore(BBKinds jumpKind, BasicBlock* block, bool extendRegion)
{
// Create a new BasicBlock and chain it in
- BasicBlock* newBlk = BasicBlock::New(this, jumpKind, jumpDest);
+ BasicBlock* newBlk = BasicBlock::New(this, jumpKind);
newBlk->SetFlags(BBF_INTERNAL);
fgInsertBBbefore(block, newBlk);
@@ -6092,14 +6066,11 @@ BasicBlock* Compiler::fgNewBBbefore(BBKinds jumpKind,
* Insert a BasicBlock after the given block.
*/
-BasicBlock* Compiler::fgNewBBafter(BBKinds jumpKind,
- BasicBlock* block,
- bool extendRegion,
- BasicBlock* jumpDest /* = nullptr */)
+BasicBlock* Compiler::fgNewBBafter(BBKinds jumpKind, BasicBlock* block, bool extendRegion)
{
// Create a new BasicBlock and chain it in
- BasicBlock* newBlk = BasicBlock::New(this, jumpKind, jumpDest);
+ BasicBlock* newBlk = BasicBlock::New(this, jumpKind);
newBlk->SetFlags(BBF_INTERNAL);
fgInsertBBafter(block, newBlk);
@@ -6139,7 +6110,6 @@ BasicBlock* Compiler::fgNewBBafter(BBKinds jumpKind,
// tree - tree that will be wrapped into a statement and
// inserted in the new block.
// debugInfo - debug info to propagate into the new statement.
-// jumpDest - the jump target of the new block. Defaults to nullptr.
// updateSideEffects - update side effects for the whole statement.
//
// Return Value:
@@ -6148,14 +6118,10 @@ BasicBlock* Compiler::fgNewBBafter(BBKinds jumpKind,
// Notes:
// The new block will have BBF_INTERNAL flag and EH region will be extended
//
-BasicBlock* Compiler::fgNewBBFromTreeAfter(BBKinds jumpKind,
- BasicBlock* block,
- GenTree* tree,
- DebugInfo& debugInfo,
- BasicBlock* jumpDest /* = nullptr */,
- bool updateSideEffects /* = false */)
+BasicBlock* Compiler::fgNewBBFromTreeAfter(
+ BBKinds jumpKind, BasicBlock* block, GenTree* tree, DebugInfo& debugInfo, bool updateSideEffects /* = false */)
{
- BasicBlock* newBlock = fgNewBBafter(jumpKind, block, true, jumpDest);
+ BasicBlock* newBlock = fgNewBBafter(jumpKind, block, true);
newBlock->SetFlags(BBF_INTERNAL);
Statement* stmt = fgNewStmtFromTree(tree, debugInfo);
fgInsertStmtAtEnd(newBlock, stmt);
@@ -6577,7 +6543,6 @@ DONE:
// [0..compHndBBtabCount].
// nearBlk - insert the new block closely after this block, if possible. If nullptr, put the new block anywhere
// in the requested region.
-// jumpDest - the jump target of the new block. Defaults to nullptr.
// putInFilter - put the new block in the filter region given by hndIndex, as described above.
// runRarely - 'true' if the new block is run rarely.
// insertAtEnd - 'true' if the block should be inserted at the end of the region. Note: this is currently only
@@ -6590,7 +6555,6 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind,
unsigned tryIndex,
unsigned hndIndex,
BasicBlock* nearBlk,
- BasicBlock* jumpDest /* = nullptr */,
bool putInFilter /* = false */,
bool runRarely /* = false */,
bool insertAtEnd /* = false */)
@@ -6716,7 +6680,7 @@ _FoundAfterBlk:;
bbKindNames[jumpKind], tryIndex, hndIndex, dspBool(putInFilter), dspBool(runRarely), dspBool(insertAtEnd),
afterBlk->bbNum);
- return fgNewBBinRegionWorker(jumpKind, afterBlk, regionIndex, putInTryRegion, jumpDest);
+ return fgNewBBinRegionWorker(jumpKind, afterBlk, regionIndex, putInTryRegion);
}
//------------------------------------------------------------------------
@@ -6727,7 +6691,6 @@ _FoundAfterBlk:;
// Arguments:
// jumpKind - the jump kind of the new block to create.
// srcBlk - insert the new block in the same EH region as this block, and closely after it if possible.
-// jumpDest - the jump target of the new block. Defaults to nullptr.
// runRarely - 'true' if the new block is run rarely.
// insertAtEnd - 'true' if the block should be inserted at the end of the region. Note: this is currently only
// implemented when inserting into the main function (not into any EH region).
@@ -6737,7 +6700,6 @@ _FoundAfterBlk:;
BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind,
BasicBlock* srcBlk,
- BasicBlock* jumpDest /* = nullptr */,
bool runRarely /* = false */,
bool insertAtEnd /* = false */)
{
@@ -6756,7 +6718,7 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind,
putInFilter = ehGetDsc(hndIndex - 1)->InFilterRegionBBRange(srcBlk);
}
- return fgNewBBinRegion(jumpKind, tryIndex, hndIndex, srcBlk, jumpDest, putInFilter, runRarely, insertAtEnd);
+ return fgNewBBinRegion(jumpKind, tryIndex, hndIndex, srcBlk, putInFilter, runRarely, insertAtEnd);
}
//------------------------------------------------------------------------
@@ -6766,14 +6728,13 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind,
//
// Arguments:
// jumpKind - the jump kind of the new block to create.
-// jumpDest - the jump target of the new block. Defaults to nullptr.
//
// Return Value:
// The new block.
-BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, BasicBlock* jumpDest /* = nullptr */)
+BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind)
{
- return fgNewBBinRegion(jumpKind, 0, 0, nullptr, jumpDest, /* putInFilter */ false, /* runRarely */ false,
+ return fgNewBBinRegion(jumpKind, 0, 0, nullptr, /* putInFilter */ false, /* runRarely */ false,
/* insertAtEnd */ true);
}
@@ -6792,7 +6753,6 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, BasicBlock* jumpDest /*
// set its handler index to the most nested handler region enclosing that 'try' region.
// Otherwise, put the block in the handler region specified by 'regionIndex', and set its 'try'
// index to the most nested 'try' region enclosing that handler region.
-// jumpDest - the jump target of the new block. Defaults to nullptr.
//
// Return Value:
// The new block.
@@ -6800,13 +6760,12 @@ BasicBlock* Compiler::fgNewBBinRegion(BBKinds jumpKind, BasicBlock* jumpDest /*
BasicBlock* Compiler::fgNewBBinRegionWorker(BBKinds jumpKind,
BasicBlock* afterBlk,
unsigned regionIndex,
- bool putInTryRegion,
- BasicBlock* jumpDest /* = nullptr */)
+ bool putInTryRegion)
{
/* Insert the new block */
BasicBlock* afterBlkNext = afterBlk->Next();
(void)afterBlkNext; // prevent "unused variable" error from GCC
- BasicBlock* newBlk = fgNewBBafter(jumpKind, afterBlk, false, jumpDest);
+ BasicBlock* newBlk = fgNewBBafter(jumpKind, afterBlk, false);
if (putInTryRegion)
{
diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp
index 6a2068169e1a..3818cbcd471d 100644
--- a/src/coreclr/jit/fgdiagnostic.cpp
+++ b/src/coreclr/jit/fgdiagnostic.cpp
@@ -134,7 +134,7 @@ void Compiler::fgDebugCheckUpdate()
// Check for an unnecessary jumps to the next block.
// A conditional branch should never jump to the next block as it can be folded into a BBJ_ALWAYS.
- if (block->KindIs(BBJ_COND) && block->TrueTargetIs(block->GetFalseTarget()))
+ if (block->KindIs(BBJ_COND) && block->TrueEdgeIs(block->GetFalseEdge()))
{
noway_assert(!"Unnecessary jump to the next block!");
}
@@ -3003,7 +3003,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef
{
for (unsigned i = 0; i < sd.numDistinctSuccs; i++)
{
- const BasicBlock* const nonDuplicateSucc = sd.nonDuplicates[i];
+ const BasicBlock* const nonDuplicateSucc = sd.nonDuplicates[i]->getDestinationBlock();
assert(nonDuplicateSucc != nullptr);
assert(nonDuplicateSucc->bbTraversalStamp == curTraversalStamp);
}
@@ -3981,7 +3981,8 @@ void Compiler::fgDebugCheckBlockLinks()
assert(uniqueSuccSet.numDistinctSuccs == count);
for (unsigned i = 0; i < uniqueSuccSet.numDistinctSuccs; i++)
{
- assert(BitVecOps::IsMember(&bitVecTraits, succBlocks, uniqueSuccSet.nonDuplicates[i]->bbNum));
+ assert(BitVecOps::IsMember(&bitVecTraits, succBlocks,
+ uniqueSuccSet.nonDuplicates[i]->getDestinationBlock()->bbNum));
}
}
}
diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp
index 4c781cbc0c22..c69c5d277753 100644
--- a/src/coreclr/jit/fgehopt.cpp
+++ b/src/coreclr/jit/fgehopt.cpp
@@ -167,12 +167,12 @@ PhaseStatus Compiler::fgRemoveEmptyFinally()
fgPrepareCallFinallyRetForRemoval(leaveBlock);
fgRemoveBlock(leaveBlock, /* unreachable */ true);
- currentBlock->SetKindAndTarget(BBJ_ALWAYS, postTryFinallyBlock);
- currentBlock->RemoveFlags(BBF_RETLESS_CALL); // no longer a BBJ_CALLFINALLY
-
// Ref count updates.
- fgAddRefPred(postTryFinallyBlock, currentBlock);
- fgRemoveRefPred(firstBlock, currentBlock);
+ fgRemoveRefPred(currentBlock->GetTargetEdge());
+ FlowEdge* const newEdge = fgAddRefPred(postTryFinallyBlock, currentBlock);
+
+ currentBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
+ currentBlock->RemoveFlags(BBF_RETLESS_CALL); // no longer a BBJ_CALLFINALLY
// Cleanup the postTryFinallyBlock
fgCleanupContinuation(postTryFinallyBlock);
@@ -524,8 +524,8 @@ PhaseStatus Compiler::fgRemoveEmptyTry()
GenTree* finallyRetExpr = finallyRet->GetRootNode();
assert(finallyRetExpr->gtOper == GT_RETFILT);
fgRemoveStmt(block, finallyRet);
- block->SetKindAndTarget(BBJ_ALWAYS, continuation);
- fgAddRefPred(continuation, block);
+ FlowEdge* const newEdge = fgAddRefPred(continuation, block);
+ block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
}
}
@@ -1093,9 +1093,9 @@ PhaseStatus Compiler::fgCloneFinally()
GenTree* finallyRetExpr = finallyRet->GetRootNode();
assert(finallyRetExpr->gtOper == GT_RETFILT);
fgRemoveStmt(newBlock, finallyRet);
- newBlock->SetKindAndTarget(BBJ_ALWAYS, normalCallFinallyReturn);
- fgAddRefPred(normalCallFinallyReturn, newBlock);
+ FlowEdge* const newEdge = fgAddRefPred(normalCallFinallyReturn, newBlock);
+ newBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
}
else
{
@@ -1135,13 +1135,13 @@ PhaseStatus Compiler::fgCloneFinally()
fgPrepareCallFinallyRetForRemoval(leaveBlock);
fgRemoveBlock(leaveBlock, /* unreachable */ true);
+ // Ref count updates.
+ fgRemoveRefPred(currentBlock->GetTargetEdge());
+ FlowEdge* const newEdge = fgAddRefPred(firstCloneBlock, currentBlock);
+
// This call returns to the expected spot, so retarget it to branch to the clone.
- currentBlock->SetKindAndTarget(BBJ_ALWAYS, firstCloneBlock);
currentBlock->RemoveFlags(BBF_RETLESS_CALL); // no longer a BBJ_CALLFINALLY
-
- // Ref count updates.
- fgAddRefPred(firstCloneBlock, currentBlock);
- fgRemoveRefPred(firstBlock, currentBlock);
+ currentBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
// Make sure iteration isn't going off the deep end.
assert(leaveBlock != endCallFinallyRangeBlock);
@@ -1757,10 +1757,10 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block,
JITDUMP("Redirecting branch in " FMT_BB " from " FMT_BB " to " FMT_BB ".\n", block->bbNum, callFinally->bbNum,
canonicalCallFinally->bbNum);
- block->SetTarget(canonicalCallFinally);
- fgAddRefPred(canonicalCallFinally, block);
assert(callFinally->bbRefs > 0);
- fgRemoveRefPred(callFinally, block);
+ fgRemoveRefPred(block->GetTargetEdge());
+ FlowEdge* const newEdge = fgAddRefPred(canonicalCallFinally, block);
+ block->SetTargetEdge(newEdge);
// Update profile counts
//
@@ -2103,19 +2103,20 @@ void Compiler::fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock,
assert(predBlock->KindIs(BBJ_COND));
assert(predBlock->FalseTargetIs(nonCanonicalBlock));
- BasicBlock* const newBlock = fgNewBBafter(BBJ_ALWAYS, predBlock, true, canonicalBlock);
- predBlock->SetFalseTarget(newBlock);
+ BasicBlock* const newBlock = fgNewBBafter(BBJ_ALWAYS, predBlock, true);
JITDUMP("*** " FMT_BB " now falling through to empty " FMT_BB " and then to " FMT_BB "\n", predBlock->bbNum,
newBlock->bbNum, canonicalBlock->bbNum);
// Remove the old flow
- fgRemoveRefPred(nonCanonicalBlock, predBlock);
+ fgRemoveRefPred(predEdge);
// Wire up the new flow
- fgAddRefPred(newBlock, predBlock, predEdge);
+ FlowEdge* const falseEdge = fgAddRefPred(newBlock, predBlock, predEdge);
+ predBlock->SetFalseEdge(falseEdge);
- fgAddRefPred(canonicalBlock, newBlock, predEdge);
+ FlowEdge* const newEdge = fgAddRefPred(canonicalBlock, newBlock, predEdge);
+ newBlock->SetTargetEdge(newEdge);
// If nonCanonicalBlock has only one pred, all its flow transfers.
// If it has multiple preds, then we need edge counts or likelihoods
@@ -2145,21 +2146,25 @@ void Compiler::fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock,
{
JITDUMP("*** " FMT_BB " now branching to " FMT_BB "\n", predBlock->bbNum, canonicalBlock->bbNum);
- // Remove the old flow
- fgRemoveRefPred(nonCanonicalBlock, predBlock);
+ FlowEdge* const newEdge = fgAddRefPred(canonicalBlock, predBlock, predEdge);
- // Wire up the new flow
if (predBlock->KindIs(BBJ_ALWAYS))
{
+ // Remove the old flow
assert(predBlock->TargetIs(nonCanonicalBlock));
- predBlock->SetTarget(canonicalBlock);
+ fgRemoveRefPred(predBlock->GetTargetEdge());
+
+ // Wire up the new flow
+ predBlock->SetTargetEdge(newEdge);
}
else
{
+ // Remove the old flow
assert(predBlock->KindIs(BBJ_COND));
assert(predBlock->TrueTargetIs(nonCanonicalBlock));
- predBlock->SetTrueTarget(canonicalBlock);
- }
+ fgRemoveRefPred(predBlock->GetTrueEdge());
- fgAddRefPred(canonicalBlock, predBlock, predEdge);
+ // Wire up the new flow
+ predBlock->SetTrueEdge(newEdge);
+ }
}
diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp
index 479e8b5fb144..79e9d41d7dc9 100644
--- a/src/coreclr/jit/fgflow.cpp
+++ b/src/coreclr/jit/fgflow.cpp
@@ -136,32 +136,6 @@ FlowEdge* Compiler::fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, FlowE
if (flowLast->getSourceBlock() == blockPred)
{
flow = flowLast;
-
- // This edge should have been given a likelihood when it was created.
- // Since we're increasing its duplicate count, update the likelihood.
- //
- assert(flow->hasLikelihood());
- const unsigned numSucc = blockPred->NumSucc();
- assert(numSucc > 0);
-
- if (numSucc == 1)
- {
- // BasicBlock::NumSucc() returns 1 for BBJ_CONDs with the same true/false target.
- // For blocks that only ever have one successor (BBJ_ALWAYS, BBJ_LEAVE, etc.),
- // their successor edge should never have a duplicate count over 1.
- //
- assert(blockPred->KindIs(BBJ_COND));
- assert(blockPred->TrueTargetIs(blockPred->GetFalseTarget()));
- flow->setLikelihood(1.0);
- }
- else
- {
- // Duplicate count isn't updated until later, so add 1 for now.
- //
- const unsigned dupCount = flow->getDupCount() + 1;
- assert(dupCount > 1);
- flow->setLikelihood((1.0 / numSucc) * dupCount);
- }
}
}
}
@@ -211,14 +185,6 @@ FlowEdge* Compiler::fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, FlowE
if (initializingPreds)
{
block->bbLastPred = flow;
-
- // When initializing preds, ensure edge likelihood is set,
- // such that this edge is as likely as any other successor edge
- //
- const unsigned numSucc = blockPred->NumSucc();
- assert(numSucc > 0);
- assert(flow->getDupCount() == 1);
- flow->setLikelihood(1.0 / numSucc);
}
else if ((oldEdge != nullptr) && oldEdge->hasLikelihood())
{
@@ -268,10 +234,6 @@ FlowEdge* Compiler::fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, FlowE
//
assert(block->checkPredListOrder());
- // When initializing preds, edge likelihood should always be set.
- //
- assert(!initializingPreds || flow->hasLikelihood());
-
return flow;
}
@@ -284,62 +246,6 @@ template FlowEdge* Compiler::fgAddRefPred<true>(BasicBlock* block,
FlowEdge* oldEdge /* = nullptr */);
//------------------------------------------------------------------------
-// fgRemoveRefPred: Decrements the reference count of a predecessor edge from "blockPred" to "block",
-// removing the edge if it is no longer necessary.
-//
-// Arguments:
-// block -- A block to operate on.
-// blockPred -- The predecessor block to remove from the predecessor list. It must be a predecessor of "block".
-//
-// Return Value:
-// If the flow edge was removed (the predecessor has a "dup count" of 1),
-// returns the flow graph edge that was removed. This means "blockPred" is no longer a predecessor of "block".
-// Otherwise, returns nullptr. This means that "blockPred" is still a predecessor of "block" (because "blockPred"
-// is a switch with multiple cases jumping to "block", or a BBJ_COND with both conditional and fall-through
-// paths leading to "block").
-//
-// Assumptions:
-// -- "blockPred" must be a predecessor block of "block".
-//
-// Notes:
-// -- block->bbRefs is decremented by one to account for the reduction in incoming edges.
-// -- block->bbRefs is adjusted even if preds haven't been computed. If preds haven't been computed,
-// the preds themselves aren't touched.
-// -- fgModified is set if a flow edge is removed (but not if an existing flow edge dup count is decremented),
-// indicating that the flow graph shape has changed.
-//
-FlowEdge* Compiler::fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred)
-{
- noway_assert(block != nullptr);
- noway_assert(blockPred != nullptr);
- noway_assert(block->countOfInEdges() > 0);
- assert(fgPredsComputed);
- block->bbRefs--;
-
- FlowEdge** ptrToPred;
- FlowEdge* pred = fgGetPredForBlock(block, blockPred, &ptrToPred);
- noway_assert(pred != nullptr);
- noway_assert(pred->getDupCount() > 0);
-
- pred->decrementDupCount();
-
- if (pred->getDupCount() == 0)
- {
- // Splice out the predecessor edge since it's no longer necessary.
- *ptrToPred = pred->getNextPredEdge();
-
- // Any changes to the flow graph invalidate the dominator sets.
- fgModified = true;
-
- return pred;
- }
- else
- {
- return nullptr;
- }
-}
-
-//------------------------------------------------------------------------
// fgRemoveRefPred: Decrements the reference count of `edge`, removing it from its successor block's pred list
// if the reference count is zero.
//
@@ -435,12 +341,12 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block)
case BBJ_ALWAYS:
case BBJ_EHCATCHRET:
case BBJ_EHFILTERRET:
- fgRemoveRefPred(block->GetTarget(), block);
+ fgRemoveRefPred(block->GetTargetEdge());
break;
case BBJ_COND:
- fgRemoveRefPred(block->GetTrueTarget(), block);
- fgRemoveRefPred(block->GetFalseTarget(), block);
+ fgRemoveRefPred(block->GetTrueEdge());
+ fgRemoveRefPred(block->GetFalseEdge());
break;
case BBJ_EHFINALLYRET:
@@ -502,16 +408,20 @@ Compiler::SwitchUniqueSuccSet Compiler::GetDescriptorForSwitch(BasicBlock* switc
// Now we have a set of unique successors.
unsigned numNonDups = BitVecOps::Count(&blockVecTraits, uniqueSuccBlocks);
- BasicBlock** nonDups = new (getAllocator()) BasicBlock*[numNonDups];
+ FlowEdge** nonDups = new (getAllocator()) FlowEdge*[numNonDups];
unsigned nonDupInd = 0;
+
// At this point, all unique targets are in "uniqueSuccBlocks". As we encounter each,
// add to nonDups, remove from "uniqueSuccBlocks".
- for (BasicBlock* const targ : switchBlk->SwitchTargets())
+ BBswtDesc* const swtDesc = switchBlk->GetSwitchTargets();
+ for (unsigned i = 0; i < swtDesc->bbsCount; i++)
{
+ FlowEdge* const succEdge = swtDesc->bbsDstTab[i];
+ BasicBlock* const targ = succEdge->getDestinationBlock();
if (BitVecOps::IsMember(&blockVecTraits, uniqueSuccBlocks, targ->bbNum))
{
- nonDups[nonDupInd] = targ;
+ nonDups[nonDupInd] = succEdge;
nonDupInd++;
BitVecOps::RemoveElemD(&blockVecTraits, uniqueSuccBlocks, targ->bbNum);
}
diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp
index 54003637e7e3..cdabeae13d32 100644
--- a/src/coreclr/jit/fginline.cpp
+++ b/src/coreclr/jit/fginline.cpp
@@ -675,15 +675,18 @@ private:
if (condTree->IsIntegralConst(0))
{
- m_compiler->fgRemoveRefPred(block->GetTrueTarget(), block);
- block->SetKindAndTarget(BBJ_ALWAYS, block->Next());
+ m_compiler->fgRemoveRefPred(block->GetTrueEdge());
+ block->SetKindAndTargetEdge(BBJ_ALWAYS, block->GetFalseEdge());
block->SetFlags(BBF_NONE_QUIRK);
}
else
{
- m_compiler->fgRemoveRefPred(block->GetFalseTarget(), block);
+ m_compiler->fgRemoveRefPred(block->GetFalseEdge());
block->SetKind(BBJ_ALWAYS);
}
+
+ FlowEdge* const edge = m_compiler->fgGetPredForBlock(block->GetTarget(), block);
+ edge->setLikelihood(1.0);
}
}
else
@@ -1533,9 +1536,9 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
JITDUMP("\nConvert bbKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum,
bottomBlock->bbNum);
- block->SetKindAndTarget(BBJ_ALWAYS, bottomBlock);
FlowEdge* const newEdge = fgAddRefPred(bottomBlock, block);
newEdge->setLikelihood(1.0);
+ block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
if (block == InlineeCompiler->fgLastBB)
{
@@ -1551,11 +1554,12 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
// Insert inlinee's blocks into inliner's block list.
assert(topBlock->KindIs(BBJ_ALWAYS));
assert(topBlock->TargetIs(bottomBlock));
+ fgRemoveRefPred(topBlock->GetTargetEdge());
+ FlowEdge* const newEdge = fgAddRefPred(InlineeCompiler->fgFirstBB, topBlock, topBlock->GetTargetEdge());
+
topBlock->SetNext(InlineeCompiler->fgFirstBB);
- topBlock->SetTarget(topBlock->Next());
+ topBlock->SetTargetEdge(newEdge);
topBlock->SetFlags(BBF_NONE_QUIRK);
- FlowEdge* const oldEdge = fgRemoveRefPred(bottomBlock, topBlock);
- fgAddRefPred(InlineeCompiler->fgFirstBB, topBlock, oldEdge);
InlineeCompiler->fgLastBB->SetNext(bottomBlock);
//
diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp
index 011562ac4730..c68c4818101a 100644
--- a/src/coreclr/jit/fgopt.cpp
+++ b/src/coreclr/jit/fgopt.cpp
@@ -132,7 +132,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock)
block->RemoveFlags(BBF_REMOVED | BBF_INTERNAL);
block->SetFlags(BBF_IMPORTED);
- block->SetKindAndTarget(BBJ_THROW);
+ block->SetKindAndTargetEdge(BBJ_THROW);
block->bbSetRunRarely();
}
else
@@ -623,8 +623,8 @@ PhaseStatus Compiler::fgPostImportationCleanup()
// What follows is similar to fgNewBBInRegion, but we can't call that
// here as the oldTryEntry is no longer in the main bb list.
- newTryEntry = BasicBlock::New(this, BBJ_ALWAYS, tryEntryPrev->Next());
- newTryEntry->SetFlags(BBF_IMPORTED | BBF_INTERNAL | BBF_NONE_QUIRK);
+ newTryEntry = BasicBlock::New(this);
+ newTryEntry->SetFlags(BBF_IMPORTED | BBF_INTERNAL);
newTryEntry->bbRefs = 0;
// Set the right EH region indices on this new block.
@@ -643,12 +643,14 @@ PhaseStatus Compiler::fgPostImportationCleanup()
// plausible flow target. Simplest is to just mark it as a throw.
if (bbIsHandlerBeg(newTryEntry->Next()))
{
- newTryEntry->SetKindAndTarget(BBJ_THROW);
+ newTryEntry->SetKindAndTargetEdge(BBJ_THROW);
}
else
{
FlowEdge* const newEdge = fgAddRefPred(newTryEntry->Next(), newTryEntry);
newEdge->setLikelihood(1.0);
+ newTryEntry->SetFlags(BBF_NONE_QUIRK);
+ newTryEntry->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
}
JITDUMP("OSR: changing start of try region #%u from " FMT_BB " to new " FMT_BB "\n",
@@ -774,7 +776,7 @@ PhaseStatus Compiler::fgPostImportationCleanup()
fromBlock->SetFlags(BBF_INTERNAL);
newBlock->RemoveFlags(BBF_DONT_REMOVE);
addedBlocks++;
- FlowEdge* const normalTryEntryEdge = fgGetPredForBlock(newBlock, fromBlock);
+ FlowEdge* const normalTryEntryEdge = fromBlock->GetTargetEdge();
GenTree* const entryStateLcl = gtNewLclvNode(entryStateVar, TYP_INT);
GenTree* const compareEntryStateToZero =
@@ -782,9 +784,9 @@ PhaseStatus Compiler::fgPostImportationCleanup()
GenTree* const jumpIfEntryStateZero = gtNewOperNode(GT_JTRUE, TYP_VOID, compareEntryStateToZero);
fgNewStmtAtBeg(fromBlock, jumpIfEntryStateZero);
- fromBlock->SetCond(toBlock, newBlock);
FlowEdge* const osrTryEntryEdge = fgAddRefPred(toBlock, fromBlock);
newBlock->inheritWeight(fromBlock);
+ fromBlock->SetCond(osrTryEntryEdge, normalTryEntryEdge);
// Not sure what the correct edge likelihoods are just yet;
// for now we'll say the OSR path is the likely one.
@@ -833,9 +835,9 @@ PhaseStatus Compiler::fgPostImportationCleanup()
if (entryJumpTarget != osrEntry)
{
- fgFirstBB->SetTarget(entryJumpTarget);
- FlowEdge* const oldEdge = fgRemoveRefPred(osrEntry, fgFirstBB);
- fgAddRefPred(entryJumpTarget, fgFirstBB, oldEdge);
+ fgRemoveRefPred(fgFirstBB->GetTargetEdge());
+ FlowEdge* const newEdge = fgAddRefPred(entryJumpTarget, fgFirstBB, fgFirstBB->GetTargetEdge());
+ fgFirstBB->SetTargetEdge(newEdge);
JITDUMP("OSR: redirecting flow from method entry " FMT_BB " to OSR entry " FMT_BB
" via step blocks.\n",
@@ -1006,7 +1008,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext)
noway_assert(block->hasTryIndex() == bNext->hasTryIndex());
JITDUMP("\nCompacting " FMT_BB " into " FMT_BB ":\n", bNext->bbNum, block->bbNum);
- fgRemoveRefPred(bNext, block);
+ fgRemoveRefPred(block->GetTargetEdge());
if (bNext->countOfInEdges() > 0)
{
@@ -1286,24 +1288,31 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext)
case BBJ_ALWAYS:
case BBJ_EHCATCHRET:
case BBJ_EHFILTERRET:
- block->SetKindAndTarget(bNext->GetKind(), bNext->GetTarget());
+ {
+ /* Update the predecessor list for bNext's target */
+ FlowEdge* const targetEdge = bNext->GetTargetEdge();
+ fgReplacePred(targetEdge, block);
- /* Update the predecessor list for 'bNext->bbTarget' */
- fgReplacePred(bNext->GetTarget(), bNext, block);
+ block->SetKindAndTargetEdge(bNext->GetKind(), targetEdge);
break;
+ }
case BBJ_COND:
- block->SetCond(bNext->GetTrueTarget(), bNext->GetFalseTarget());
-
- /* Update the predecessor list for 'bNext->bbTrueTarget' */
- fgReplacePred(bNext->GetTrueTarget(), bNext, block);
+ {
+ /* Update the predecessor list for bNext's true target */
+ FlowEdge* const trueEdge = bNext->GetTrueEdge();
+ FlowEdge* const falseEdge = bNext->GetFalseEdge();
+ fgReplacePred(trueEdge, block);
- /* Update the predecessor list for 'bNext->bbFalseTarget' if it is different than 'bNext->bbTrueTarget' */
- if (!bNext->TrueTargetIs(bNext->GetFalseTarget()))
+ /* Update the predecessor list for bNext's false target if it is different from the true target */
+ if (trueEdge != falseEdge)
{
- fgReplacePred(bNext->GetFalseTarget(), bNext, block);
+ fgReplacePred(falseEdge, block);
}
+
+ block->SetCond(trueEdge, falseEdge);
break;
+ }
case BBJ_EHFINALLYRET:
block->SetEhf(bNext->GetEhfTargets());
@@ -1525,7 +1534,7 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc
bDest->SetFlags(BBF_RUN_RARELY); // Set the RarelyRun flag
}
- FlowEdge* edge2 = fgGetPredForBlock(bDest->GetTarget(), bDest);
+ FlowEdge* edge2 = bDest->GetTargetEdge();
if (edge2 != nullptr)
{
@@ -1561,19 +1570,27 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc
{
case BBJ_ALWAYS:
case BBJ_CALLFINALLYRET:
- block->SetTarget(bDest->GetTarget());
+ {
+ fgRemoveRefPred(block->GetTargetEdge());
+ FlowEdge* const newEdge = fgAddRefPred(bDest->GetTarget(), block, block->GetTargetEdge());
+ block->SetTargetEdge(newEdge);
break;
+ }
case BBJ_COND:
if (block->TrueTargetIs(bDest))
{
assert(!block->FalseTargetIs(bDest));
- block->SetTrueTarget(bDest->GetTarget());
+ fgRemoveRefPred(block->GetTrueEdge());
+ FlowEdge* const trueEdge = fgAddRefPred(bDest->GetTarget(), block, block->GetTrueEdge());
+ block->SetTrueEdge(trueEdge);
}
else
{
assert(block->FalseTargetIs(bDest));
- block->SetFalseTarget(bDest->GetTarget());
+ fgRemoveRefPred(block->GetFalseEdge());
+ FlowEdge* const falseEdge = fgAddRefPred(bDest->GetTarget(), block, block->GetFalseEdge());
+ block->SetFalseEdge(falseEdge);
}
break;
@@ -1581,8 +1598,6 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc
unreached();
}
- fgAddRefPred(bDest->GetTarget(), block, fgRemoveRefPred(bDest, block));
-
return true;
}
return false;
@@ -1642,7 +1657,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block)
else
{
// TODO-NoFallThrough: Once BBJ_COND blocks have pointers to their false branches,
- // allow removing empty BBJ_ALWAYS and pointing bPrev's false branch to block->bbTarget.
+ // allow removing empty BBJ_ALWAYS and pointing bPrev's false branch to block's target.
if (bPrev->bbFallsThrough() && !block->JumpsToNext())
{
break;
@@ -1856,7 +1871,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
{
if (fgHaveValidEdgeWeights)
{
- FlowEdge* edge = fgGetPredForBlock(bDest, block);
+ FlowEdge* edge = *jmpTab;
weight_t branchThroughWeight = edge->edgeWeightMin();
if (bDest->bbWeight > branchThroughWeight)
@@ -1872,7 +1887,9 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
}
// Update the switch jump table
- FlowEdge* const newEdge = fgAddRefPred(bNewDest, block, fgRemoveRefPred(bDest, block));
+ FlowEdge* const oldEdge = *jmpTab;
+ fgRemoveRefPred(oldEdge);
+ FlowEdge* const newEdge = fgAddRefPred(bNewDest, block, oldEdge);
*jmpTab = newEdge;
// we optimized a Switch label - goto REPEAT_SWITCH to follow this new jump
@@ -1998,10 +2015,10 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
}
// Change the switch jump into a BBJ_ALWAYS
- block->SetKindAndTarget(BBJ_ALWAYS, block->GetSwitchTargets()->bbsDstTab[0]->getDestinationBlock());
+ block->SetKindAndTargetEdge(BBJ_ALWAYS, block->GetSwitchTargets()->bbsDstTab[0]);
for (unsigned i = 1; i < jmpCnt; ++i)
{
- fgRemoveRefPred(jmpTab[i]->getDestinationBlock(), block);
+ fgRemoveRefPred(jmpTab[i]);
}
return true;
@@ -2060,9 +2077,9 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
fgSetStmtSeq(switchStmt);
}
- BasicBlock* const trueTarget = block->GetSwitchTargets()->bbsDstTab[0]->getDestinationBlock();
- BasicBlock* const falseTarget = block->GetSwitchTargets()->bbsDstTab[1]->getDestinationBlock();
- block->SetCond(trueTarget, falseTarget);
+ FlowEdge* const trueEdge = block->GetSwitchTargets()->bbsDstTab[0];
+ FlowEdge* const falseEdge = block->GetSwitchTargets()->bbsDstTab[1];
+ block->SetCond(trueEdge, falseEdge);
JITDUMP("After:\n");
DISPNODE(switchTree);
@@ -2470,27 +2487,30 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock*
fgInsertStmtAtEnd(block, cloneStmt);
}
- // add an unconditional block after this block to jump to the target block's fallthrough block
+ // Fix up block's flow.
+ // Assume edge likelihoods transfer over.
//
- assert(!target->IsLast());
- BasicBlock* next = fgNewBBafter(BBJ_ALWAYS, block, true, target->GetFalseTarget());
+ fgRemoveRefPred(block->GetTargetEdge());
- // Fix up block's flow
- //
- block->SetCond(target->GetTrueTarget(), next);
- fgAddRefPred(block->GetTrueTarget(), block);
- fgRemoveRefPred(target, block);
+ FlowEdge* const trueEdge = fgAddRefPred(target->GetTrueTarget(), block, target->GetTrueEdge());
+ FlowEdge* const falseEdge = fgAddRefPred(target->GetFalseTarget(), block, target->GetFalseEdge());
+ block->SetCond(trueEdge, falseEdge);
- // The new block 'next' will inherit its weight from 'block'
- //
- next->inheritWeight(block);
- fgAddRefPred(next, block);
- fgAddRefPred(next->GetTarget(), next);
-
- JITDUMP("fgOptimizeUncondBranchToSimpleCond(from " FMT_BB " to cond " FMT_BB "), created new uncond " FMT_BB "\n",
- block->bbNum, target->bbNum, next->bbNum);
+ JITDUMP("fgOptimizeUncondBranchToSimpleCond(from " FMT_BB " to cond " FMT_BB "), modified " FMT_BB "\n",
+ block->bbNum, target->bbNum, block->bbNum);
JITDUMP(" expecting opts to key off V%02u in " FMT_BB "\n", lclNum, block->bbNum);
+ if (target->hasProfileWeight() && block->hasProfileWeight())
+ {
+ // Remove weight from target since block now bypasses it...
+ //
+ weight_t targetWeight = target->bbWeight;
+ weight_t blockWeight = block->bbWeight;
+ target->setBBProfileWeight(max(0, targetWeight - blockWeight));
+ JITDUMP("Decreased " FMT_BB " profile weight from " FMT_WT " to " FMT_WT "\n", target->bbNum, targetWeight,
+ target->bbWeight);
+ }
+
return true;
}
@@ -2504,7 +2524,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock*
void Compiler::fgRemoveConditionalJump(BasicBlock* block)
{
assert(block->KindIs(BBJ_COND));
- assert(block->TrueTargetIs(block->GetFalseTarget()));
+ assert(block->TrueEdgeIs(block->GetFalseEdge()));
BasicBlock* target = block->GetTrueTarget();
@@ -2624,7 +2644,7 @@ void Compiler::fgRemoveConditionalJump(BasicBlock* block)
* block are counted twice so we have to remove one of them */
noway_assert(target->countOfInEdges() > 1);
- fgRemoveRefPred(target, block);
+ fgRemoveRefPred(block->GetTargetEdge());
}
//-------------------------------------------------------------
@@ -2884,21 +2904,21 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
// We need to update the following flags of the bJump block if they were set in the bDest block
bJump->CopyFlags(bDest, BBF_COPY_PROPAGATE);
- bJump->SetCond(bDestNormalTarget, bJump->Next());
-
/* Update bbRefs and bbPreds */
// bJump now falls through into the next block
//
- fgAddRefPred(bJump->GetFalseTarget(), bJump);
+ FlowEdge* const falseEdge = fgAddRefPred(bJump->Next(), bJump);
// bJump no longer jumps to bDest
//
- fgRemoveRefPred(bDest, bJump);
+ fgRemoveRefPred(bJump->GetTargetEdge());
// bJump now jumps to bDest's normal jump target
//
- fgAddRefPred(bDestNormalTarget, bJump);
+ FlowEdge* const trueEdge = fgAddRefPred(bDestNormalTarget, bJump);
+
+ bJump->SetCond(trueEdge, falseEdge);
if (weightJump > 0)
{
@@ -3044,11 +3064,9 @@ bool Compiler::fgOptimizeSwitchJumps()
// Wire up the new control flow.
//
- block->SetCond(dominantTarget, newBlock);
FlowEdge* const blockToTargetEdge = fgAddRefPred(dominantTarget, block);
FlowEdge* const blockToNewBlockEdge = newBlock->bbPreds;
- assert(blockToNewBlockEdge->getSourceBlock() == block);
- assert(blockToTargetEdge->getSourceBlock() == block);
+ block->SetCond(blockToTargetEdge, blockToNewBlockEdge);
// Update profile data
//
@@ -3517,11 +3535,11 @@ bool Compiler::fgReorderBlocks(bool useProfile)
assert(test->OperIsConditionalJump());
test->AsOp()->gtOp1 = gtReverseCond(test->AsOp()->gtOp1);
- BasicBlock* newFalseTarget = block->GetTrueTarget();
- BasicBlock* newTrueTarget = block->GetFalseTarget();
- block->SetTrueTarget(newTrueTarget);
- block->SetFalseTarget(newFalseTarget);
- assert(block->CanRemoveJumpToTarget(newFalseTarget, this));
+ FlowEdge* const newFalseEdge = block->GetTrueEdge();
+ FlowEdge* const newTrueEdge = block->GetFalseEdge();
+ block->SetTrueEdge(newTrueEdge);
+ block->SetFalseEdge(newFalseEdge);
+ assert(block->CanRemoveJumpToTarget(block->GetFalseTarget(), this));
}
else
{
@@ -3653,7 +3671,7 @@ bool Compiler::fgReorderBlocks(bool useProfile)
// The edge bPrev -> bDest must have a higher minimum weight
// than every other edge into bDest
//
- FlowEdge* edgeFromPrev = fgGetPredForBlock(bDest, bPrev);
+ FlowEdge* edgeFromPrev = bPrev->GetTargetEdge();
noway_assert(edgeFromPrev != nullptr);
// Examine all of the other edges into bDest
@@ -3745,8 +3763,9 @@ bool Compiler::fgReorderBlocks(bool useProfile)
// V
// bDest ---------------> [BB08, weight 21]
//
- FlowEdge* edgeToDest = fgGetPredForBlock(bDest, bPrev);
- FlowEdge* edgeToBlock = fgGetPredForBlock(block, bPrev);
+ assert(bPrev->FalseTargetIs(block));
+ FlowEdge* edgeToDest = bPrev->GetTrueEdge();
+ FlowEdge* edgeToBlock = bPrev->GetFalseEdge();
noway_assert(edgeToDest != nullptr);
noway_assert(edgeToBlock != nullptr);
//
@@ -4578,10 +4597,10 @@ bool Compiler::fgReorderBlocks(bool useProfile)
noway_assert(condTest->gtOper == GT_JTRUE);
condTest->AsOp()->gtOp1 = gtReverseCond(condTest->AsOp()->gtOp1);
- BasicBlock* trueTarget = bPrev->GetTrueTarget();
- BasicBlock* falseTarget = bPrev->GetFalseTarget();
- bPrev->SetTrueTarget(falseTarget);
- bPrev->SetFalseTarget(trueTarget);
+ FlowEdge* const trueEdge = bPrev->GetTrueEdge();
+ FlowEdge* const falseEdge = bPrev->GetFalseEdge();
+ bPrev->SetTrueEdge(falseEdge);
+ bPrev->SetFalseEdge(trueEdge);
// may need to rethread
//
@@ -4817,13 +4836,11 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh
if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, bDest))
{
assert(block->KindIs(BBJ_COND));
- change = true;
- modified = true;
- bDest = block->GetTrueTarget();
- bNext = block->GetFalseTarget();
-
- // TODO-NoFallThrough: Adjust the above logic once bbFalseTarget can diverge from bbNext
- assert(block->NextIs(bNext));
+ assert(bNext == block->Next());
+ change = true;
+ modified = true;
+ bDest = block->GetTrueTarget();
+ bFalseDest = block->GetFalseTarget();
}
}
@@ -4984,13 +5001,15 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh
if (bDest->KindIs(BBJ_COND) && !bDest->NextIs(bDest->GetFalseTarget()))
{
BasicBlock* const bDestFalseTarget = bDest->GetFalseTarget();
- BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true, bDestFalseTarget);
- bDest->SetFalseTarget(bFixup);
+ BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true);
bFixup->inheritWeight(bDestFalseTarget);
- fgRemoveRefPred(bDestFalseTarget, bDest);
- fgAddRefPred(bFixup, bDest);
- fgAddRefPred(bDestFalseTarget, bFixup);
+ fgRemoveRefPred(bDest->GetFalseEdge());
+ FlowEdge* const falseEdge = fgAddRefPred(bFixup, bDest);
+ bDest->SetFalseEdge(falseEdge);
+
+ FlowEdge* const newEdge = fgAddRefPred(bDestFalseTarget, bFixup);
+ bFixup->SetTargetEdge(newEdge);
}
}
}
@@ -5018,10 +5037,11 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh
}
// Optimize the Conditional JUMP to go to the new target
- block->SetTrueTarget(bNext->GetTarget());
- block->SetFalseTarget(bNext->Next());
-
- fgAddRefPred(bNext->GetTarget(), block, fgRemoveRefPred(bNext->GetTarget(), bNext));
+ fgRemoveRefPred(block->GetFalseEdge());
+ fgRemoveRefPred(bNext->GetTargetEdge());
+ block->SetFalseEdge(block->GetTrueEdge());
+ FlowEdge* const newEdge = fgAddRefPred(bNext->GetTarget(), block, bNext->GetTargetEdge());
+ block->SetTrueEdge(newEdge);
/*
Unlink bNext from the BasicBlock list; note that we can
@@ -5033,7 +5053,6 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPh
to the final target by the time we're done here.
*/
- fgRemoveRefPred(bNext, block);
fgUnlinkBlockForRemoval(bNext);
/* Mark the block as removed */
@@ -5666,13 +5685,13 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early)
// Fix up the flow.
//
- predBlock->SetKindAndTarget(BBJ_ALWAYS, crossJumpTarget);
-
if (commSucc != nullptr)
{
- fgRemoveRefPred(commSucc, predBlock);
+ fgRemoveRefPred(predBlock->GetTargetEdge());
}
- fgAddRefPred(crossJumpTarget, predBlock);
+
+ FlowEdge* const newEdge = fgAddRefPred(crossJumpTarget, predBlock);
+ predBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
}
// We changed things
@@ -5841,7 +5860,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early)
// ternaries in C#).
// The logic below could be generalized to BBJ_SWITCH, but this currently
// has almost no CQ benefit but does have a TP impact.
- if (!block->KindIs(BBJ_COND) || block->TrueTargetIs(block->GetFalseTarget()))
+ if (!block->KindIs(BBJ_COND) || block->TrueEdgeIs(block->GetFalseEdge()))
{
return false;
}
diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp
index 02a4c22d0aad..f3b1534fb6b8 100644
--- a/src/coreclr/jit/fgprofile.cpp
+++ b/src/coreclr/jit/fgprofile.cpp
@@ -507,12 +507,12 @@ void BlockCountInstrumentor::RelocateProbes()
//
if (criticalPreds.Height() > 0)
{
- BasicBlock* const intermediary =
- m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true, /* jumpDest */ block);
+ BasicBlock* const intermediary = m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true);
intermediary->SetFlags(BBF_IMPORTED | BBF_MARKED | BBF_NONE_QUIRK);
intermediary->inheritWeight(block);
FlowEdge* const newEdge = m_comp->fgAddRefPred(block, intermediary);
newEdge->setLikelihood(1.0);
+ intermediary->SetTargetEdge(newEdge);
SetModifiedFlow();
while (criticalPreds.Height() > 0)
@@ -1679,12 +1679,12 @@ void EfficientEdgeCountInstrumentor::RelocateProbes()
//
if (criticalPreds.Height() > 0)
{
- BasicBlock* intermediary =
- m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true, /* jumpDest */ block);
+ BasicBlock* intermediary = m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true);
intermediary->SetFlags(BBF_IMPORTED | BBF_NONE_QUIRK);
intermediary->inheritWeight(block);
FlowEdge* const newEdge = m_comp->fgAddRefPred(block, intermediary);
newEdge->setLikelihood(1.0);
+ intermediary->SetTargetEdge(newEdge);
NewRelocatedProbe(intermediary, probe->source, probe->target, &leader);
SetModifiedFlow();
@@ -1947,7 +1947,7 @@ public:
if (node->IsCall() && node->AsCall()->IsSpecialIntrinsic())
{
const NamedIntrinsic ni = m_compiler->lookupNamedIntrinsic(node->AsCall()->gtCallMethHnd);
- if ((ni == NI_System_Buffer_Memmove) || (ni == NI_System_SpanHelpers_SequenceEqual))
+ if ((ni == NI_System_SpanHelpers_Memmove) || (ni == NI_System_SpanHelpers_SequenceEqual))
{
m_functor(m_compiler, node);
}
@@ -2274,7 +2274,7 @@ public:
return;
}
- assert(node->AsCall()->IsSpecialIntrinsic(compiler, NI_System_Buffer_Memmove) ||
+ assert(node->AsCall()->IsSpecialIntrinsic(compiler, NI_System_SpanHelpers_Memmove) ||
node->AsCall()->IsSpecialIntrinsic(compiler, NI_System_SpanHelpers_SequenceEqual));
const ICorJitInfo::PgoInstrumentationSchema& countEntry = m_schema[*m_currentSchemaIndex];
@@ -2540,10 +2540,13 @@ PhaseStatus Compiler::fgPrepareToInstrumentMethod()
// These are marked as [Intrinsic] only to be handled (unrolled) for constant inputs.
// In other cases they have large managed implementations we want to profile.
case NI_System_String_Equals:
- case NI_System_Buffer_Memmove:
+ case NI_System_SpanHelpers_Memmove:
case NI_System_MemoryExtensions_Equals:
case NI_System_MemoryExtensions_SequenceEqual:
case NI_System_MemoryExtensions_StartsWith:
+ case NI_System_SpanHelpers_Fill:
+ case NI_System_SpanHelpers_SequenceEqual:
+ case NI_System_SpanHelpers_ClearWithoutReferences:
// Same here, these are only folded when JIT knows the exact types
case NI_System_Type_IsAssignableFrom:
@@ -3859,18 +3862,22 @@ void EfficientEdgeCountReconstructor::PropagateOSREntryEdges(BasicBlock* block,
{
// We expect one pseudo-edge and at least one normal edge.
//
- Edge* pseudoEdge = nullptr;
- unsigned nEdges = 0;
+ Edge* pseudoEdge = nullptr;
+ weight_t pseudoEdgeWeight = 0;
+ unsigned nEdges = 0;
+ weight_t successorWeight = BB_ZERO_WEIGHT;
for (Edge* edge = info->m_outgoingEdges; edge != nullptr; edge = edge->m_nextOutgoingEdge)
{
if (edge->m_isPseudoEdge)
{
assert(pseudoEdge == nullptr);
- pseudoEdge = edge;
+ pseudoEdge = edge;
+ pseudoEdgeWeight = edge->m_weight;
continue;
}
+ successorWeight += edge->m_weight;
nEdges++;
}
@@ -3887,28 +3894,25 @@ void EfficientEdgeCountReconstructor::PropagateOSREntryEdges(BasicBlock* block,
assert(nEdges == nSucc);
- if (info->m_weight == BB_ZERO_WEIGHT)
+ if ((info->m_weight == BB_ZERO_WEIGHT) || (successorWeight == BB_ZERO_WEIGHT))
{
- JITDUMP("\nPropagate: OSR entry block weight is zero\n");
+ JITDUMP("\nPropagate: OSR entry block or successor weight is zero\n");
EntryWeightZero();
return;
}
// Transfer model edge weight onto the FlowEdges as likelihoods.
//
- assert(nEdges == nSucc);
- weight_t totalLikelihood = 0;
+ JITDUMP("Normalizing OSR successor likelihoods with factor 1/" FMT_WT "\n", successorWeight);
for (Edge* edge = info->m_outgoingEdges; edge != nullptr; edge = edge->m_nextOutgoingEdge)
{
assert(block == edge->m_sourceBlock);
- // The pseudo edge doesn't correspond to a flow edge,
- // but it carries away some flow.
+ // The pseudo edge doesn't correspond to a flow edge.
//
if (edge == pseudoEdge)
{
- totalLikelihood += edge->m_weight / info->m_weight;
continue;
}
@@ -3924,51 +3928,19 @@ void EfficientEdgeCountReconstructor::PropagateOSREntryEdges(BasicBlock* block,
if (nEdges == 1)
{
- // Conceptually we could assert(edge->m_weight == info->m_weight);
- // but we can have inconsistencies.
- //
// Go with what we know for sure, edge should be 100% likely.
//
likelihood = 1.0;
JITDUMP("Setting likelihood of " FMT_BB " -> " FMT_BB " to " FMT_WT " (uniq)\n", block->bbNum,
edge->m_targetBlock->bbNum, likelihood);
flowEdge->setLikelihood(likelihood);
- totalLikelihood += likelihood;
break;
}
- assert(info->m_weight != BB_ZERO_WEIGHT);
-
- // We may see nonsensical weights here, cap likelihood.
- //
- bool capped = false;
- if (edge->m_weight > info->m_weight)
- {
- capped = true;
- likelihood = 1.0;
- }
- else
- {
- likelihood = edge->m_weight / info->m_weight;
- }
- JITDUMP("Setting likelihood of " FMT_BB " -> " FMT_BB " to " FMT_WT " (%s)\n", block->bbNum,
- edge->m_targetBlock->bbNum, likelihood, capped ? "pgo -- capped" : "pgo");
+ likelihood = edge->m_weight / successorWeight;
+ JITDUMP("Setting likelihood of " FMT_BB " -> " FMT_BB " to " FMT_WT " (pgo)\n", block->bbNum,
+ edge->m_targetBlock->bbNum, likelihood);
flowEdge->setLikelihood(likelihood);
- totalLikelihood += likelihood;
- }
-
- // Note we expect real flow imbalances here as it's likely there
- // was no observed flow from the OSR entry to some of its successors.
- // Since we added in the pseudo edge likelihood above, the check below
- // probably won't flag this.
- //
- // Seems like for OSR we will always want to run synthesis/repair.
- //
- if (totalLikelihood != 1.0)
- {
- // Consider what to do here... flag this method as needing immediate profile repairs?
- //
- JITDUMP(FMT_BB " total outgoing likelihood inaccurate: " FMT_WT "\n", block->bbNum, totalLikelihood);
}
}
@@ -3981,10 +3953,6 @@ void EfficientEdgeCountReconstructor::PropagateOSREntryEdges(BasicBlock* block,
// info - model info for the block
// nSucc - number of successors of the block in the flow graph
//
-// Notes:
-// This block requires special handling because original method flow
-// was interrupted here.
-//
void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInfo* info, unsigned nSucc)
{
// There is at least one FlowEdge.
@@ -3992,8 +3960,9 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf
// Check the reconstruction graph edges. For normal blocks, if we have
// any pseudo-edges there should be only one pseudo-edge, and no regular edges.
//
- Edge* pseudoEdge = nullptr;
- unsigned nEdges = 0;
+ Edge* pseudoEdge = nullptr;
+ unsigned nEdges = 0;
+ weight_t successorWeight = BB_ZERO_WEIGHT;
for (Edge* edge = info->m_outgoingEdges; edge != nullptr; edge = edge->m_nextOutgoingEdge)
{
@@ -4004,6 +3973,7 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf
continue;
}
+ successorWeight += edge->m_weight;
nEdges++;
}
@@ -4020,7 +3990,7 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf
assert(nSucc == 1);
assert(block == pseudoEdge->m_sourceBlock);
assert(block->HasInitializedTarget());
- FlowEdge* const flowEdge = m_comp->fgGetPredForBlock(block->GetTarget(), block);
+ FlowEdge* const flowEdge = block->GetTargetEdge();
assert(flowEdge != nullptr);
flowEdge->setLikelihood(1.0);
return;
@@ -4030,7 +4000,7 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf
//
// This can happen because bome BBJ_LEAVE blocks may have been missed during
// our spanning tree walk since we don't know where all the finallies can return
- // to just yet (specially, in WalkSpanningTree, we may not add the bbTarget of
+ // to just yet (specially, in WalkSpanningTree, we may not add the target of
// a BBJ_LEAVE to the worklist).
//
// Worst case those missed blocks dominate other blocks so we can't limit
@@ -4043,19 +4013,19 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf
//
// (TODO: use synthesis here)
//
- if ((nEdges != nSucc) || (info->m_weight == BB_ZERO_WEIGHT))
+ if ((nEdges != nSucc) || (info->m_weight == BB_ZERO_WEIGHT) || (successorWeight == BB_ZERO_WEIGHT))
{
JITDUMP(FMT_BB " %s , setting outgoing likelihoods heuristically\n", block->bbNum,
(nEdges != nSucc) ? "has inaccurate flow model" : "has zero weight");
weight_t equalLikelihood = 1.0 / nSucc;
- for (BasicBlock* succ : block->Succs(m_comp))
+ for (FlowEdge* const succEdge : block->SuccEdges(m_comp))
{
- FlowEdge* const flowEdge = m_comp->fgGetPredForBlock(succ, block);
- JITDUMP("Setting likelihood of " FMT_BB " -> " FMT_BB " to " FMT_WT " (heur)\n", block->bbNum, succ->bbNum,
- equalLikelihood);
- flowEdge->setLikelihood(equalLikelihood);
+ BasicBlock* const succBlock = succEdge->getDestinationBlock();
+ JITDUMP("Setting likelihood of " FMT_BB " -> " FMT_BB " to " FMT_WT " (heur)\n", block->bbNum,
+ succBlock->bbNum, equalLikelihood);
+ succEdge->setLikelihood(equalLikelihood);
}
return;
@@ -4064,7 +4034,7 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf
// Transfer model edge weight onto the FlowEdges as likelihoods.
//
assert(nEdges == nSucc);
- weight_t totalLikelihood = 0;
+ JITDUMP("Normalizing successor likelihoods with factor 1/" FMT_WT "\n", successorWeight);
for (Edge* edge = info->m_outgoingEdges; edge != nullptr; edge = edge->m_nextOutgoingEdge)
{
@@ -4076,45 +4046,17 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf
if (nEdges == 1)
{
assert(nSucc == 1);
-
- // Conceptually we could assert(edge->m_weight == info->m_weight);
- // but we can have inconsistencies.
- //
- // Go with what we know for sure, edge should be 100% likely.
- //
likelihood = 1.0;
JITDUMP("Setting likelihood of " FMT_BB " -> " FMT_BB " to " FMT_WT " (uniq)\n", block->bbNum,
edge->m_targetBlock->bbNum, likelihood);
flowEdge->setLikelihood(likelihood);
- totalLikelihood += likelihood;
break;
}
- assert(info->m_weight != BB_ZERO_WEIGHT);
-
- // We may see nonsensical weights here, cap likelihood.
- //
- bool capped = false;
- if (edge->m_weight > info->m_weight)
- {
- capped = true;
- likelihood = 1.0;
- }
- else
- {
- likelihood = edge->m_weight / info->m_weight;
- }
- JITDUMP("Setting likelihood of " FMT_BB " -> " FMT_BB " to " FMT_WT " (%s)\n", block->bbNum,
- edge->m_targetBlock->bbNum, likelihood, capped ? "pgo -- capped" : "pgo");
+ likelihood = edge->m_weight / successorWeight;
+ JITDUMP("Setting likelihood of " FMT_BB " -> " FMT_BB " to " FMT_WT " (pgo)\n", block->bbNum,
+ edge->m_targetBlock->bbNum, likelihood);
flowEdge->setLikelihood(likelihood);
- totalLikelihood += likelihood;
- }
-
- if (totalLikelihood != 1.0)
- {
- // Consider what to do here... flag this method as needing immediate profile repairs?
- //
- JITDUMP(FMT_BB " total outgoing likelihood inaccurate: " FMT_WT "\n", block->bbNum, totalLikelihood);
}
}
@@ -4975,13 +4917,13 @@ PhaseStatus Compiler::fgComputeEdgeWeights()
BasicBlock* otherDst;
if (bSrc->FalseTargetIs(bDst))
{
- otherDst = bSrc->GetTrueTarget();
+ otherEdge = bSrc->GetTrueEdge();
}
else
{
- otherDst = bSrc->GetFalseTarget();
+ otherEdge = bSrc->GetFalseEdge();
}
- otherEdge = fgGetPredForBlock(otherDst, bSrc);
+ otherDst = otherEdge->getDestinationBlock();
// If we see min/max violations, just give up on the computations
//
@@ -5293,7 +5235,8 @@ void Compiler::fgDebugCheckProfileWeights()
}
else
{
- ProfileChecks checks = ProfileChecks::CHECK_HASLIKELIHOOD | ProfileChecks::RAISE_ASSERT;
+ ProfileChecks checks =
+ ProfileChecks::CHECK_HASLIKELIHOOD | ProfileChecks::CHECK_LIKELIHOODSUM | ProfileChecks::RAISE_ASSERT;
fgDebugCheckProfileWeights(checks);
}
}
@@ -5325,6 +5268,7 @@ void Compiler::fgDebugCheckProfileWeights(ProfileChecks checks)
const bool verifyClassicWeights = fgEdgeWeightsComputed && hasFlag(checks, ProfileChecks::CHECK_CLASSIC);
const bool verifyLikelyWeights = hasFlag(checks, ProfileChecks::CHECK_LIKELY);
const bool verifyHasLikelihood = hasFlag(checks, ProfileChecks::CHECK_HASLIKELIHOOD);
+ const bool verifyLikelihoodSum = hasFlag(checks, ProfileChecks::CHECK_LIKELIHOODSUM);
const bool assertOnFailure = hasFlag(checks, ProfileChecks::RAISE_ASSERT);
const bool checkAllBlocks = hasFlag(checks, ProfileChecks::CHECK_ALL_BLOCKS);
@@ -5475,6 +5419,10 @@ void Compiler::fgDebugCheckProfileWeights(ProfileChecks checks)
JITDUMP("Profile is self-consistent (%d profiled blocks, %d unprofiled)\n", profiledBlocks,
unprofiledBlocks);
}
+ else if (verifyLikelihoodSum)
+ {
+ JITDUMP("All block successor flow edge likelihoods sum to 1.0\n");
+ }
else if (verifyHasLikelihood)
{
JITDUMP("All flow edges have likelihoods\n");
@@ -5615,10 +5563,10 @@ bool Compiler::fgDebugCheckIncomingProfileData(BasicBlock* block, ProfileChecks
bool Compiler::fgDebugCheckOutgoingProfileData(BasicBlock* block, ProfileChecks checks)
{
const bool verifyClassicWeights = fgEdgeWeightsComputed && hasFlag(checks, ProfileChecks::CHECK_CLASSIC);
- const bool verifyLikelyWeights = hasFlag(checks, ProfileChecks::CHECK_LIKELY);
const bool verifyHasLikelihood = hasFlag(checks, ProfileChecks::CHECK_HASLIKELIHOOD);
+ const bool verifyLikelihoodSum = hasFlag(checks, ProfileChecks::CHECK_LIKELIHOODSUM);
- if (!(verifyClassicWeights || verifyLikelyWeights || verifyHasLikelihood))
+ if (!(verifyClassicWeights || verifyHasLikelihood || verifyLikelihoodSum))
{
return true;
}
@@ -5642,17 +5590,10 @@ bool Compiler::fgDebugCheckOutgoingProfileData(BasicBlock* block, ProfileChecks
unsigned missingEdges = 0;
unsigned missingLikelihood = 0;
- for (unsigned i = 0; i < numSuccs; i++)
+ for (FlowEdge* succEdge : block->SuccEdges(this))
{
- BasicBlock* succBlock = block->GetSucc(i, this);
- FlowEdge* succEdge = fgGetPredForBlock(succBlock, block);
-
- if (succEdge == nullptr)
- {
- missingEdges++;
- JITDUMP(" " FMT_BB " can't find successor edge to " FMT_BB "\n", block->bbNum, succBlock->bbNum);
- continue;
- }
+ assert(succEdge != nullptr);
+ BasicBlock* succBlock = succEdge->getDestinationBlock();
outgoingWeightMin += succEdge->edgeWeightMin();
outgoingWeightMax += succEdge->edgeWeightMax();
@@ -5708,7 +5649,7 @@ bool Compiler::fgDebugCheckOutgoingProfileData(BasicBlock* block, ProfileChecks
}
}
- if (verifyLikelyWeights)
+ if (verifyLikelihoodSum)
{
if (!fgProfileWeightsConsistent(outgoingLikelihood, 1.0))
{
diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp
index e315e33015e1..a8d16a5b7d24 100644
--- a/src/coreclr/jit/fgprofilesynthesis.cpp
+++ b/src/coreclr/jit/fgprofilesynthesis.cpp
@@ -11,7 +11,6 @@
// TODO
//
-// * faster way of doing fgGetPredForBlock
// * vet against some real data
// * IR based heuristics (perhaps)
// * During Cp, avoid repeatedly propagating through nested loops
@@ -142,14 +141,6 @@ void ProfileSynthesis::AssignLikelihoods()
break;
case BBJ_CALLFINALLY:
- // Single successor next cases
- //
- // Note we handle flow to the finally
- // specially; this represents return
- // from the finally.
- AssignLikelihoodNext(block);
- break;
-
case BBJ_ALWAYS:
case BBJ_CALLFINALLYRET:
case BBJ_LEAVE:
@@ -176,28 +167,15 @@ void ProfileSynthesis::AssignLikelihoods()
}
//------------------------------------------------------------------------
-// AssignLikelihoodNext: update edge likelihood for block that always
-// transfers control to bbNext
-//
-// Arguments;
-// block -- block in question
-//
-void ProfileSynthesis::AssignLikelihoodNext(BasicBlock* block)
-{
- FlowEdge* const edge = m_comp->fgGetPredForBlock(block->Next(), block);
- edge->setLikelihood(1.0);
-}
-
-//------------------------------------------------------------------------
// AssignLikelihoodJump: update edge likelihood for a block that always
-// transfers control to bbTarget
+// transfers control to its target block
//
// Arguments;
// block -- block in question
//
void ProfileSynthesis::AssignLikelihoodJump(BasicBlock* block)
{
- FlowEdge* const edge = m_comp->fgGetPredForBlock(block->GetTarget(), block);
+ FlowEdge* const edge = block->GetTargetEdge();
edge->setLikelihood(1.0);
}
@@ -210,36 +188,37 @@ void ProfileSynthesis::AssignLikelihoodJump(BasicBlock* block)
//
void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block)
{
- BasicBlock* const jump = block->GetTrueTarget();
- BasicBlock* const next = block->GetFalseTarget();
+ FlowEdge* const trueEdge = block->GetTrueEdge();
+ FlowEdge* const falseEdge = block->GetFalseEdge();
// Watch for degenerate case
//
- if (jump == next)
+ if (trueEdge == falseEdge)
{
- AssignLikelihoodNext(block);
+ assert(trueEdge->getDupCount() == 2);
+ trueEdge->setLikelihood(1.0);
return;
}
- FlowEdge* const jumpEdge = m_comp->fgGetPredForBlock(jump, block);
- FlowEdge* const nextEdge = m_comp->fgGetPredForBlock(next, block);
+ BasicBlock* trueTarget = trueEdge->getDestinationBlock();
+ BasicBlock* falseTarget = falseEdge->getDestinationBlock();
// THROW heuristic
//
- bool const isJumpThrow = jump->KindIs(BBJ_THROW);
- bool const isNextThrow = next->KindIs(BBJ_THROW);
+ bool const isTrueThrow = trueTarget->KindIs(BBJ_THROW);
+ bool const isFalseThrow = falseTarget->KindIs(BBJ_THROW);
- if (isJumpThrow != isNextThrow)
+ if (isTrueThrow != isFalseThrow)
{
- if (isJumpThrow)
+ if (isTrueThrow)
{
- jumpEdge->setLikelihood(0.0);
- nextEdge->setLikelihood(1.0);
+ trueEdge->setLikelihood(0.0);
+ falseEdge->setLikelihood(1.0);
}
else
{
- jumpEdge->setLikelihood(1.0);
- nextEdge->setLikelihood(0.0);
+ trueEdge->setLikelihood(1.0);
+ falseEdge->setLikelihood(0.0);
}
return;
@@ -247,22 +226,22 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block)
// LOOP BACK EDGE heuristic
//
- bool const isJumpEdgeBackEdge = m_loops->IsLoopBackEdge(jumpEdge);
- bool const isNextEdgeBackEdge = m_loops->IsLoopBackEdge(nextEdge);
+ bool const isTrueEdgeBackEdge = m_loops->IsLoopBackEdge(trueEdge);
+ bool const isFalseEdgeBackEdge = m_loops->IsLoopBackEdge(falseEdge);
- if (isJumpEdgeBackEdge != isNextEdgeBackEdge)
+ if (isTrueEdgeBackEdge != isFalseEdgeBackEdge)
{
- if (isJumpEdgeBackEdge)
+ if (isTrueEdgeBackEdge)
{
- JITDUMP(FMT_BB "->" FMT_BB " is loop back edge\n", block->bbNum, jump->bbNum);
- jumpEdge->setLikelihood(loopBackLikelihood);
- nextEdge->setLikelihood(1.0 - loopBackLikelihood);
+ JITDUMP(FMT_BB "->" FMT_BB " is loop back edge\n", block->bbNum, trueTarget->bbNum);
+ trueEdge->setLikelihood(loopBackLikelihood);
+ falseEdge->setLikelihood(1.0 - loopBackLikelihood);
}
else
{
- JITDUMP(FMT_BB "->" FMT_BB " is loop back edge\n", block->bbNum, next->bbNum);
- jumpEdge->setLikelihood(1.0 - loopBackLikelihood);
- nextEdge->setLikelihood(loopBackLikelihood);
+ JITDUMP(FMT_BB "->" FMT_BB " is loop back edge\n", block->bbNum, falseTarget->bbNum);
+ trueEdge->setLikelihood(1.0 - loopBackLikelihood);
+ falseEdge->setLikelihood(loopBackLikelihood);
}
return;
@@ -273,22 +252,22 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block)
// Consider: adjust probability if loop has multiple exit edges, so that
// overall exit probability is around 0.1.
//
- bool const isJumpEdgeExitEdge = m_loops->IsLoopExitEdge(jumpEdge);
- bool const isNextEdgeExitEdge = m_loops->IsLoopExitEdge(nextEdge);
+ bool const isTrueEdgeExitEdge = m_loops->IsLoopExitEdge(trueEdge);
+ bool const isFalseEdgeExitEdge = m_loops->IsLoopExitEdge(falseEdge);
- if (isJumpEdgeExitEdge != isNextEdgeExitEdge)
+ if (isTrueEdgeExitEdge != isFalseEdgeExitEdge)
{
- if (isJumpEdgeExitEdge)
+ if (isTrueEdgeExitEdge)
{
- JITDUMP(FMT_BB "->" FMT_BB " is loop exit edge\n", block->bbNum, jump->bbNum);
- jumpEdge->setLikelihood(1.0 - loopExitLikelihood);
- nextEdge->setLikelihood(loopExitLikelihood);
+ JITDUMP(FMT_BB "->" FMT_BB " is loop exit edge\n", block->bbNum, trueTarget->bbNum);
+ trueEdge->setLikelihood(1.0 - loopExitLikelihood);
+ falseEdge->setLikelihood(loopExitLikelihood);
}
else
{
- JITDUMP(FMT_BB "->" FMT_BB " is loop exit edge\n", block->bbNum, next->bbNum);
- jumpEdge->setLikelihood(loopExitLikelihood);
- nextEdge->setLikelihood(1.0 - loopExitLikelihood);
+ JITDUMP(FMT_BB "->" FMT_BB " is loop exit edge\n", block->bbNum, falseTarget->bbNum);
+ trueEdge->setLikelihood(loopExitLikelihood);
+ falseEdge->setLikelihood(1.0 - loopExitLikelihood);
}
return;
@@ -296,20 +275,20 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block)
// RETURN heuristic
//
- bool const isJumpReturn = jump->KindIs(BBJ_RETURN);
- bool const isNextReturn = next->KindIs(BBJ_RETURN);
+ bool const isJumpReturn = trueTarget->KindIs(BBJ_RETURN);
+ bool const isNextReturn = falseTarget->KindIs(BBJ_RETURN);
if (isJumpReturn != isNextReturn)
{
if (isJumpReturn)
{
- jumpEdge->setLikelihood(returnLikelihood);
- nextEdge->setLikelihood(1.0 - returnLikelihood);
+ trueEdge->setLikelihood(returnLikelihood);
+ falseEdge->setLikelihood(1.0 - returnLikelihood);
}
else
{
- jumpEdge->setLikelihood(1.0 - returnLikelihood);
- nextEdge->setLikelihood(returnLikelihood);
+ trueEdge->setLikelihood(1.0 - returnLikelihood);
+ falseEdge->setLikelihood(returnLikelihood);
}
return;
@@ -319,8 +298,8 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block)
//
// Give slight preference to bbNext
//
- jumpEdge->setLikelihood(1.0 - ilNextLikelihood);
- nextEdge->setLikelihood(ilNextLikelihood);
+ trueEdge->setLikelihood(1.0 - ilNextLikelihood);
+ falseEdge->setLikelihood(ilNextLikelihood);
}
//------------------------------------------------------------------------
@@ -342,10 +321,9 @@ void ProfileSynthesis::AssignLikelihoodSwitch(BasicBlock* block)
// Each unique edge gets some multiple of that basic probability
//
- for (BasicBlock* const succ : block->Succs(m_comp))
+ for (FlowEdge* const succEdge : block->SuccEdges(m_comp))
{
- FlowEdge* const edge = m_comp->fgGetPredForBlock(succ, block);
- edge->setLikelihood(p * edge->getDupCount());
+ succEdge->setLikelihood(p * succEdge->getDupCount());
}
}
@@ -368,10 +346,9 @@ weight_t ProfileSynthesis::SumOutgoingLikelihoods(BasicBlock* block, WeightVecto
likelihoods->clear();
}
- for (BasicBlock* const succ : block->Succs(m_comp))
+ for (FlowEdge* const succEdge : block->SuccEdges(m_comp))
{
- FlowEdge* const edge = m_comp->fgGetPredForBlock(succ, block);
- weight_t likelihood = edge->getLikelihood();
+ weight_t likelihood = succEdge->getLikelihood();
if (likelihoods != nullptr)
{
likelihoods->push_back(likelihood);
@@ -406,11 +383,6 @@ void ProfileSynthesis::RepairLikelihoods()
break;
case BBJ_CALLFINALLY:
- // Single successor next cases.
- // Just assign 1.0
- AssignLikelihoodNext(block);
- break;
-
case BBJ_ALWAYS:
case BBJ_CALLFINALLYRET:
case BBJ_LEAVE:
@@ -498,11 +470,6 @@ void ProfileSynthesis::BlendLikelihoods()
break;
case BBJ_CALLFINALLY:
- // Single successor next cases.
- // Just assign 1.0
- AssignLikelihoodNext(block);
- break;
-
case BBJ_ALWAYS:
case BBJ_CALLFINALLYRET:
case BBJ_LEAVE:
@@ -560,15 +527,15 @@ void ProfileSynthesis::BlendLikelihoods()
JITDUMP("Blending likelihoods in " FMT_BB " with blend factor " FMT_WT " \n", block->bbNum,
blendFactor);
iter = likelihoods.begin();
- for (BasicBlock* const succ : block->Succs(m_comp))
+ for (FlowEdge* const succEdge : block->SuccEdges(m_comp))
{
- FlowEdge* const edge = m_comp->fgGetPredForBlock(succ, block);
- weight_t newLikelihood = edge->getLikelihood();
- weight_t oldLikelihood = *iter;
+ weight_t newLikelihood = succEdge->getLikelihood();
+ weight_t oldLikelihood = *iter;
- edge->setLikelihood((blendFactor * oldLikelihood) + ((1.0 - blendFactor) * newLikelihood));
- JITDUMP(FMT_BB " -> " FMT_BB " was " FMT_WT " now " FMT_WT "\n", block->bbNum, succ->bbNum,
- oldLikelihood, edge->getLikelihood());
+ succEdge->setLikelihood((blendFactor * oldLikelihood) + ((1.0 - blendFactor) * newLikelihood));
+ BasicBlock* const succBlock = succEdge->getDestinationBlock();
+ JITDUMP(FMT_BB " -> " FMT_BB " was " FMT_WT " now " FMT_WT "\n", block->bbNum, succBlock->bbNum,
+ oldLikelihood, succEdge->getLikelihood());
iter++;
}
@@ -588,10 +555,9 @@ void ProfileSynthesis::ClearLikelihoods()
{
for (BasicBlock* const block : m_comp->Blocks())
{
- for (BasicBlock* const succ : block->Succs(m_comp))
+ for (FlowEdge* const succEdge : block->SuccEdges(m_comp))
{
- FlowEdge* const edge = m_comp->fgGetPredForBlock(succ, block);
- edge->clearLikelihood();
+ succEdge->clearLikelihood();
}
}
}
@@ -664,10 +630,9 @@ void ProfileSynthesis::RandomizeLikelihoods()
}
i = 0;
- for (BasicBlock* const succ : block->Succs(m_comp))
+ for (FlowEdge* const succEdge : block->SuccEdges(m_comp))
{
- FlowEdge* const edge = m_comp->fgGetPredForBlock(succ, block);
- edge->setLikelihood(likelihoods[i++] / sum);
+ succEdge->setLikelihood(likelihoods[i++] / sum);
}
}
#endif // DEBUG
@@ -859,28 +824,26 @@ void ProfileSynthesis::ComputeCyclicProbabilities(FlowGraphNaturalLoop* loop)
" to reflect capping; current likelihood is " FMT_WT "\n",
exitBlock->bbNum, exitEdge->getLikelihood());
- BasicBlock* const jump = exitBlock->GetTrueTarget();
- BasicBlock* const next = exitBlock->GetFalseTarget();
- FlowEdge* const jumpEdge = m_comp->fgGetPredForBlock(jump, exitBlock);
- FlowEdge* const nextEdge = m_comp->fgGetPredForBlock(next, exitBlock);
- weight_t const exitLikelihood = (missingExitWeight + currentExitWeight) / exitBlockWeight;
- weight_t const continueLikelihood = 1.0 - exitLikelihood;
+ FlowEdge* const trueEdge = exitBlock->GetTrueEdge();
+ FlowEdge* const falseEdge = exitBlock->GetFalseEdge();
+ weight_t const exitLikelihood = (missingExitWeight + currentExitWeight) / exitBlockWeight;
+ weight_t const continueLikelihood = 1.0 - exitLikelihood;
// We are making it more likely that the loop exits, so the new exit likelihood
// should be greater than the old.
//
assert(exitLikelihood > exitEdge->getLikelihood());
- if (jumpEdge == exitEdge)
+ if (trueEdge == exitEdge)
{
- jumpEdge->setLikelihood(exitLikelihood);
- nextEdge->setLikelihood(continueLikelihood);
+ trueEdge->setLikelihood(exitLikelihood);
+ falseEdge->setLikelihood(continueLikelihood);
}
else
{
- assert(nextEdge == exitEdge);
- jumpEdge->setLikelihood(continueLikelihood);
- nextEdge->setLikelihood(exitLikelihood);
+ assert(falseEdge == exitEdge);
+ trueEdge->setLikelihood(continueLikelihood);
+ falseEdge->setLikelihood(exitLikelihood);
}
adjustedExit = true;
diff --git a/src/coreclr/jit/fgprofilesynthesis.h b/src/coreclr/jit/fgprofilesynthesis.h
index 9297357049e8..304ca58d9da4 100644
--- a/src/coreclr/jit/fgprofilesynthesis.h
+++ b/src/coreclr/jit/fgprofilesynthesis.h
@@ -58,7 +58,6 @@ private:
weight_t SumOutgoingLikelihoods(BasicBlock* block, WeightVector* likelihoods = nullptr);
void AssignLikelihoods();
- void AssignLikelihoodNext(BasicBlock* block);
void AssignLikelihoodJump(BasicBlock* block);
void AssignLikelihoodCond(BasicBlock* block);
void AssignLikelihoodSwitch(BasicBlock* block);
diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp
index c393648c843e..b621739a41cd 100644
--- a/src/coreclr/jit/flowgraph.cpp
+++ b/src/coreclr/jit/flowgraph.cpp
@@ -266,17 +266,11 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block)
// I want to create:
// top -> poll -> bottom (lexically)
// so that we jump over poll to get to bottom.
- BasicBlock* top = block;
- BBKinds oldJumpKind = top->GetKind();
+ BasicBlock* top = block;
BasicBlock* poll = fgNewBBafter(BBJ_ALWAYS, top, true);
bottom = fgNewBBafter(top->GetKind(), poll, true);
- poll->SetTarget(bottom);
- assert(poll->JumpsToNext());
-
- bottom->TransferTarget(top);
-
// Update block flags
const BasicBlockFlags originalFlags = top->GetFlagsRaw() | BBF_GC_SAFE_POINT;
@@ -300,7 +294,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block)
}
// Remove the last statement from Top and add it to Bottom if necessary.
- if ((oldJumpKind == BBJ_COND) || (oldJumpKind == BBJ_RETURN) || (oldJumpKind == BBJ_THROW))
+ if (top->KindIs(BBJ_COND, BBJ_RETURN, BBJ_THROW))
{
Statement* stmt = top->firstStmt();
while (stmt->GetNextStmt() != nullptr)
@@ -364,38 +358,47 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block)
}
#endif
- top->SetCond(bottom, poll);
// Bottom has Top and Poll as its predecessors. Poll has just Top as a predecessor.
- fgAddRefPred(bottom, poll);
- fgAddRefPred(bottom, top);
- fgAddRefPred(poll, top);
+ FlowEdge* const trueEdge = fgAddRefPred(bottom, top);
+ FlowEdge* const falseEdge = fgAddRefPred(poll, top);
+
+ FlowEdge* const newEdge = fgAddRefPred(bottom, poll);
+ poll->SetTargetEdge(newEdge);
+ assert(poll->JumpsToNext());
// Replace Top with Bottom in the predecessor list of all outgoing edges from Bottom
// (1 for unconditional branches, 2 for conditional branches, N for switches).
- switch (oldJumpKind)
+ switch (top->GetKind())
{
case BBJ_RETURN:
case BBJ_THROW:
// no successors
break;
+
case BBJ_COND:
// replace predecessor in true/false successors.
noway_assert(!bottom->IsLast());
- fgReplacePred(bottom->GetFalseTarget(), top, bottom);
- fgReplacePred(bottom->GetTrueTarget(), top, bottom);
+ fgReplacePred(top->GetFalseEdge(), bottom);
+ fgReplacePred(top->GetTrueEdge(), bottom);
break;
case BBJ_ALWAYS:
case BBJ_CALLFINALLY:
- fgReplacePred(bottom->GetTarget(), top, bottom);
+ fgReplacePred(top->GetTargetEdge(), bottom);
break;
+
case BBJ_SWITCH:
NO_WAY("SWITCH should be a call rather than an inlined poll.");
break;
+
default:
NO_WAY("Unknown block type for updating predecessor lists.");
+ break;
}
+ bottom->TransferTarget(top);
+ top->SetCond(trueEdge, falseEdge);
+
if (compCurBB == top)
{
compCurBB = bottom;
@@ -1625,9 +1628,9 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block)
assert(ehDsc->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX);
// Convert the BBJ_RETURN to BBJ_ALWAYS, jumping to genReturnBB.
- block->SetKindAndTarget(BBJ_ALWAYS, genReturnBB);
FlowEdge* const newEdge = fgAddRefPred(genReturnBB, block);
newEdge->setLikelihood(1.0);
+ block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
#ifdef DEBUG
if (verbose)
@@ -2097,9 +2100,9 @@ private:
// Change BBJ_RETURN to BBJ_ALWAYS targeting const return block.
assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0);
- returnBlock->SetKindAndTarget(BBJ_ALWAYS, constReturnBlock);
FlowEdge* const newEdge = comp->fgAddRefPred(constReturnBlock, returnBlock);
newEdge->setLikelihood(1.0);
+ returnBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
// Remove GT_RETURN since constReturnBlock returns the constant.
assert(returnBlock->lastStmt()->GetRootNode()->OperIs(GT_RETURN));
@@ -2758,15 +2761,14 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block)
/* Allocate a new basic block */
- BasicBlock* newHead = BasicBlock::New(this, BBJ_ALWAYS, block);
+ BasicBlock* newHead = BasicBlock::New(this);
newHead->SetFlags(BBF_INTERNAL | BBF_NONE_QUIRK);
newHead->inheritWeight(block);
newHead->bbRefs = 0;
fgInsertBBbefore(block, newHead); // insert the new block in the block list
- assert(newHead->JumpsToNext());
- fgExtendEHRegionBefore(block); // Update the EH table to make the prolog block the first block in the block's EH
- // block.
+ fgExtendEHRegionBefore(block); // Update the EH table to make the prolog block the first block in the block's EH
+ // block.
// Distribute the pred list between newHead and block. Incoming edges coming from outside
// the handler go to the prolog. Edges coming from with the handler are back-edges, and
@@ -2782,11 +2784,13 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block)
switch (predBlock->GetKind())
{
case BBJ_CALLFINALLY:
+ {
noway_assert(predBlock->TargetIs(block));
- predBlock->SetTarget(newHead);
- fgRemoveRefPred(block, predBlock);
- fgAddRefPred(newHead, predBlock);
+ fgRemoveRefPred(predBlock->GetTargetEdge());
+ FlowEdge* const newEdge = fgAddRefPred(newHead, predBlock);
+ predBlock->SetTargetEdge(newEdge);
break;
+ }
default:
// The only way into the handler is via a BBJ_CALLFINALLY (to a finally handler), or
@@ -2797,10 +2801,10 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block)
}
}
- assert(nullptr == fgGetPredForBlock(block, newHead));
- fgAddRefPred(block, newHead);
-
- assert(newHead->HasFlag(BBF_INTERNAL));
+ assert(fgGetPredForBlock(block, newHead) == nullptr);
+ FlowEdge* const newEdge = fgAddRefPred(block, newHead);
+ newHead->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
+ assert(newHead->JumpsToNext());
}
//------------------------------------------------------------------------
@@ -3374,7 +3378,7 @@ PhaseStatus Compiler::fgCreateThrowHelperBlocks()
assert((add->acdKind == SCK_FAIL_FAST) || (bbThrowIndex(srcBlk) == add->acdData));
assert(add->acdKind != SCK_NONE);
- BasicBlock* const newBlk = fgNewBBinRegion(jumpKinds[add->acdKind], srcBlk, /* jumpDest */ nullptr,
+ BasicBlock* const newBlk = fgNewBBinRegion(jumpKinds[add->acdKind], srcBlk,
/* runRarely */ true, /* insertAtEnd */ true);
// Update the descriptor
@@ -3438,7 +3442,7 @@ PhaseStatus Compiler::fgCreateThrowHelperBlocks()
#endif // DEBUG
// Mark the block as added by the compiler and not removable by future flow
- // graph optimizations. Note that no bbTarget points to these blocks.
+ // graph optimizations. Note that no target block points to these blocks.
//
newBlk->SetFlags(BBF_IMPORTED | BBF_DONT_REMOVE);
diff --git a/src/coreclr/jit/gcencode.cpp b/src/coreclr/jit/gcencode.cpp
index 96005e605766..d039cb316937 100644
--- a/src/coreclr/jit/gcencode.cpp
+++ b/src/coreclr/jit/gcencode.cpp
@@ -1564,7 +1564,7 @@ size_t GCInfo::gcInfoBlockHdrSave(
header->syncStartOffset = INVALID_SYNC_OFFSET;
header->syncEndOffset = INVALID_SYNC_OFFSET;
-#ifndef UNIX_X86_ABI
+#if !defined(FEATURE_EH_FUNCLETS)
// JIT is responsible for synchronization on funclet-based EH model that x86/Linux uses.
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
@@ -4698,7 +4698,7 @@ void GCInfo::gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder, MakeRegPtrMode mode
// unused by alignment
C_ASSERT((OFFSET_MASK + 1) <= sizeof(int));
-#ifdef DEBUG
+#if defined(DEBUG) && defined(JIT32_GCENCODER) && !defined(FEATURE_EH_FUNCLETS)
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
// Tracked variables can't be pinned, and the encoding takes
@@ -4712,7 +4712,7 @@ void GCInfo::gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder, MakeRegPtrMode mode
assert((flags & this_OFFSET_FLAG) == 0);
}
}
-#endif // DEBUG
+#endif
// Only need to do this once, and only if we have EH.
if ((mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS) && compiler->ehAnyFunclets())
diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp
index 298fd1074a4f..7b7e3c60031f 100644
--- a/src/coreclr/jit/gentree.cpp
+++ b/src/coreclr/jit/gentree.cpp
@@ -252,7 +252,6 @@ void GenTree::InitNodeSize()
GenTree::s_gtNodeSizes[GT_FIELD_ADDR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CMPXCHG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_QMARK] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_STORE_DYN_BLK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INTRINSIC] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ALLOCOBJ] = TREE_NODE_SZ_LARGE;
#if USE_HELPERS_FOR_INT_DIV
@@ -318,7 +317,6 @@ void GenTree::InitNodeSize()
static_assert_no_msg(sizeof(GenTreeStoreInd) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeAddrMode) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeBlk) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeStoreDynBlk) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeRetExpr) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeILOffset) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreePhiArg) <= TREE_NODE_SZ_SMALL);
@@ -1845,6 +1843,13 @@ regNumber CallArgs::GetCustomRegister(Compiler* comp, CorInfoCallConvExtension c
case WellKnownArg::DispatchIndirectCallTarget:
return REG_DISPATCH_INDIRECT_CALL_ADDR;
#endif
+
+#ifdef SWIFT_SUPPORT
+ case WellKnownArg::SwiftSelf:
+ assert(cc == CorInfoCallConvExtension::Swift);
+ return REG_SWIFT_SELF;
+#endif // SWIFT_SUPPORT
+
default:
break;
}
@@ -3151,11 +3156,6 @@ AGAIN:
Compare(op1->AsCmpXchg()->Data(), op2->AsCmpXchg()->Data()) &&
Compare(op1->AsCmpXchg()->Comparand(), op2->AsCmpXchg()->Comparand());
- case GT_STORE_DYN_BLK:
- return Compare(op1->AsStoreDynBlk()->Addr(), op2->AsStoreDynBlk()->Addr()) &&
- Compare(op1->AsStoreDynBlk()->Data(), op2->AsStoreDynBlk()->Data()) &&
- Compare(op1->AsStoreDynBlk()->gtDynamicSize, op2->AsStoreDynBlk()->gtDynamicSize);
-
default:
assert(!"unexpected operator");
}
@@ -3704,12 +3704,6 @@ AGAIN:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->Comparand()));
break;
- case GT_STORE_DYN_BLK:
- hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->Data()));
- hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->Addr()));
- hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->gtDynamicSize));
- break;
-
default:
#ifdef DEBUG
gtDispTree(tree);
@@ -4520,12 +4514,6 @@ bool Compiler::gtGetIndNodeCost(GenTreeIndir* node, int* pCostEx, int* pCostSz)
{
// See if we can form a complex addressing mode.
bool doAddrMode = true;
-
- // TODO-1stClassStructs: delete once IND<struct> nodes are no more.
- if (node->TypeGet() == TYP_STRUCT)
- {
- doAddrMode = false;
- }
#ifdef TARGET_ARM64
if (node->IsVolatile())
{
@@ -6384,22 +6372,6 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
}
break;
- case GT_STORE_DYN_BLK:
- level = gtSetEvalOrder(tree->AsStoreDynBlk()->Addr());
- costEx = tree->AsStoreDynBlk()->Addr()->GetCostEx();
- costSz = tree->AsStoreDynBlk()->Addr()->GetCostSz();
-
- lvl2 = gtSetEvalOrder(tree->AsStoreDynBlk()->Data());
- level = max(level, lvl2);
- costEx += tree->AsStoreDynBlk()->Data()->GetCostEx();
- costSz += tree->AsStoreDynBlk()->Data()->GetCostSz();
-
- lvl2 = gtSetEvalOrder(tree->AsStoreDynBlk()->gtDynamicSize);
- level = max(level, lvl2);
- costEx += tree->AsStoreDynBlk()->gtDynamicSize->GetCostEx();
- costSz += tree->AsStoreDynBlk()->gtDynamicSize->GetCostSz();
- break;
-
case GT_SELECT:
level = gtSetEvalOrder(tree->AsConditional()->gtCond);
costEx = tree->AsConditional()->gtCond->GetCostEx();
@@ -6713,6 +6685,7 @@ bool GenTree::TryGetUse(GenTree* operand, GenTree*** pUse)
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
case GT_NOP:
+ case GT_SWIFT_ERROR:
return false;
// Standard unary operators
@@ -6846,27 +6819,6 @@ bool GenTree::TryGetUse(GenTree* operand, GenTree*** pUse)
return false;
}
- case GT_STORE_DYN_BLK:
- {
- GenTreeStoreDynBlk* const dynBlock = this->AsStoreDynBlk();
- if (operand == dynBlock->gtOp1)
- {
- *pUse = &dynBlock->gtOp1;
- return true;
- }
- if (operand == dynBlock->gtOp2)
- {
- *pUse = &dynBlock->gtOp2;
- return true;
- }
- if (operand == dynBlock->gtDynamicSize)
- {
- *pUse = &dynBlock->gtDynamicSize;
- return true;
- }
- return false;
- }
-
case GT_CALL:
{
GenTreeCall* const call = this->AsCall();
@@ -7026,7 +6978,6 @@ bool GenTree::OperRequiresAsgFlag() const
case GT_STORE_LCL_FLD:
case GT_STOREIND:
case GT_STORE_BLK:
- case GT_STORE_DYN_BLK:
case GT_XADD:
case GT_XORR:
case GT_XAND:
@@ -7066,6 +7017,9 @@ bool GenTree::OperRequiresCallFlag(Compiler* comp) const
case GT_KEEPALIVE:
return true;
+ case GT_SWIFT_ERROR:
+ return true;
+
case GT_INTRINSIC:
return comp->IsIntrinsicImplementedByUserCall(this->AsIntrinsic()->gtIntrinsicName);
@@ -7136,7 +7090,6 @@ bool GenTree::OperIsImplicitIndir() const
case GT_CMPXCHG:
case GT_BLK:
case GT_STORE_BLK:
- case GT_STORE_DYN_BLK:
case GT_BOX:
case GT_ARR_ELEM:
case GT_ARR_LENGTH:
@@ -7233,7 +7186,6 @@ ExceptionSetFlags GenTree::OperExceptions(Compiler* comp)
case GT_BLK:
case GT_NULLCHECK:
case GT_STORE_BLK:
- case GT_STORE_DYN_BLK:
case GT_ARR_LENGTH:
case GT_MDARR_LENGTH:
case GT_MDARR_LOWER_BOUND:
@@ -7353,7 +7305,6 @@ bool GenTree::OperRequiresGlobRefFlag(Compiler* comp) const
case GT_STOREIND:
case GT_STORE_BLK:
- case GT_STORE_DYN_BLK:
case GT_XADD:
case GT_XORR:
case GT_XAND:
@@ -7362,6 +7313,7 @@ bool GenTree::OperRequiresGlobRefFlag(Compiler* comp) const
case GT_CMPXCHG:
case GT_MEMORYBARRIER:
case GT_KEEPALIVE:
+ case GT_SWIFT_ERROR:
return true;
case GT_CALL:
@@ -7411,7 +7363,6 @@ bool GenTree::OperSupportsOrderingSideEffect() const
case GT_STOREIND:
case GT_NULLCHECK:
case GT_STORE_BLK:
- case GT_STORE_DYN_BLK:
case GT_XADD:
case GT_XORR:
case GT_XAND:
@@ -7420,6 +7371,7 @@ bool GenTree::OperSupportsOrderingSideEffect() const
case GT_CMPXCHG:
case GT_MEMORYBARRIER:
case GT_CATCH_ARG:
+ case GT_SWIFT_ERROR:
return true;
default:
return false;
@@ -8746,39 +8698,11 @@ GenTreeBlk* Compiler::gtNewStoreBlkNode(ClassLayout* layout, GenTree* addr, GenT
}
//------------------------------------------------------------------------------
-// gtNewStoreDynBlkNode : Create a dynamic block store node.
-//
-// Arguments:
-// addr - Destination address
-// data - Value to store (init val or indirection representing a location)
-// dynamicSize - Node that computes number of bytes to store
-// indirFlags - Indirection flags
-//
-// Return Value:
-// The created GT_STORE_DYN_BLK node.
-//
-GenTreeStoreDynBlk* Compiler::gtNewStoreDynBlkNode(GenTree* addr,
- GenTree* data,
- GenTree* dynamicSize,
- GenTreeFlags indirFlags)
-{
- assert((indirFlags & GTF_IND_INVARIANT) == 0);
- assert(data->IsInitVal() || data->OperIs(GT_IND));
-
- GenTreeStoreDynBlk* store = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(addr, data, dynamicSize);
- store->gtFlags |= GTF_ASG;
- gtInitializeIndirNode(store, indirFlags);
- gtInitializeStoreNode(store, data);
-
- return store;
-}
-
-//------------------------------------------------------------------------------
// gtNewStoreIndNode : Create an indirect store node.
//
// Arguments:
// type - Type of the store
-// addr - Destionation address
+// addr - Destination address
// data - Value to store
// indirFlags - Indirection flags
//
@@ -9810,12 +9734,6 @@ GenTree* Compiler::gtCloneExpr(GenTree* tree)
gtCloneExpr(tree->AsCmpXchg()->Data()), gtCloneExpr(tree->AsCmpXchg()->Comparand()));
break;
- case GT_STORE_DYN_BLK:
- copy = new (this, oper) GenTreeStoreDynBlk(gtCloneExpr(tree->AsStoreDynBlk()->Addr()),
- gtCloneExpr(tree->AsStoreDynBlk()->Data()),
- gtCloneExpr(tree->AsStoreDynBlk()->gtDynamicSize));
- break;
-
case GT_SELECT:
copy =
new (this, oper) GenTreeConditional(oper, tree->TypeGet(), gtCloneExpr(tree->AsConditional()->gtCond),
@@ -10324,6 +10242,7 @@ GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node)
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
case GT_NOP:
+ case GT_SWIFT_ERROR:
m_state = -1;
return;
@@ -10427,12 +10346,6 @@ GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node)
m_advance = &GenTreeUseEdgeIterator::AdvanceArrElem;
return;
- case GT_STORE_DYN_BLK:
- m_edge = &m_node->AsStoreDynBlk()->Addr();
- assert(*m_edge != nullptr);
- m_advance = &GenTreeUseEdgeIterator::AdvanceStoreDynBlk;
- return;
-
case GT_CALL:
m_statePtr = m_node->AsCall()->gtArgs.Args().begin().GetArg();
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_ARGS>;
@@ -10499,29 +10412,6 @@ void GenTreeUseEdgeIterator::AdvanceArrElem()
}
//------------------------------------------------------------------------
-// GenTreeUseEdgeIterator::AdvanceStoreDynBlk: produces the next operand of a StoreDynBlk node and advances the state.
-//
-void GenTreeUseEdgeIterator::AdvanceStoreDynBlk()
-{
- GenTreeStoreDynBlk* const dynBlock = m_node->AsStoreDynBlk();
- switch (m_state)
- {
- case 0:
- m_edge = &dynBlock->Data();
- m_state = 1;
- break;
- case 1:
- m_edge = &dynBlock->gtDynamicSize;
- m_advance = &GenTreeUseEdgeIterator::Terminate;
- break;
- default:
- unreached();
- }
-
- assert(*m_edge != nullptr);
-}
-
-//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceFieldList: produces the next operand of a FieldList node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceFieldList()
@@ -10876,7 +10766,7 @@ bool GenTree::Precedes(GenTree* other)
//
void GenTree::SetIndirExceptionFlags(Compiler* comp)
{
- assert(OperIsIndirOrArrMetaData() && (OperIsSimple() || OperIs(GT_CMPXCHG, GT_STORE_DYN_BLK)));
+ assert(OperIsIndirOrArrMetaData() && (OperIsSimple() || OperIs(GT_CMPXCHG)));
if (IndirMayFault(comp))
{
@@ -10898,11 +10788,6 @@ void GenTree::SetIndirExceptionFlags(Compiler* comp)
gtFlags |= AsCmpXchg()->Data()->gtFlags & GTF_EXCEPT;
gtFlags |= AsCmpXchg()->Comparand()->gtFlags & GTF_EXCEPT;
}
- else if (OperIs(GT_STORE_DYN_BLK))
- {
- gtFlags |= AsStoreDynBlk()->Data()->gtFlags & GTF_EXCEPT;
- gtFlags |= AsStoreDynBlk()->gtDynamicSize->gtFlags & GTF_EXCEPT;
- }
}
#ifdef DEBUG
@@ -11325,7 +11210,6 @@ void Compiler::gtDispNode(GenTree* tree, IndentStack* indentStack, _In_ _In_opt_
case GT_IND:
case GT_STOREIND:
case GT_STORE_BLK:
- case GT_STORE_DYN_BLK:
// We prefer printing V or U
if ((tree->gtFlags & (GTF_IND_VOLATILE | GTF_IND_UNALIGNED)) == 0)
{
@@ -12451,6 +12335,7 @@ void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack)
case GT_MEMORYBARRIER:
case GT_PINVOKE_PROLOG:
case GT_JMPTABLE:
+ case GT_SWIFT_ERROR:
break;
case GT_RET_EXPR:
@@ -13152,28 +13037,6 @@ void Compiler::gtDispTree(GenTree* tree,
}
break;
- case GT_STORE_DYN_BLK:
- if (tree->OperIsCopyBlkOp())
- {
- printf(" (copy)");
- }
- else if (tree->OperIsInitBlkOp())
- {
- printf(" (init)");
- }
- gtDispCommonEndLine(tree);
-
- if (!topOnly)
- {
- gtDispChild(tree->AsStoreDynBlk()->Addr(), indentStack, IIArc, nullptr, topOnly);
- if (tree->AsStoreDynBlk()->Data() != nullptr)
- {
- gtDispChild(tree->AsStoreDynBlk()->Data(), indentStack, IIArc, nullptr, topOnly);
- }
- gtDispChild(tree->AsStoreDynBlk()->gtDynamicSize, indentStack, IIArcBottom, nullptr, topOnly);
- }
- break;
-
case GT_SELECT:
gtDispCommonEndLine(tree);
@@ -13236,6 +13099,8 @@ const char* Compiler::gtGetWellKnownArgNameForArgMsg(WellKnownArg arg)
case WellKnownArg::ValidateIndirectCallTarget:
case WellKnownArg::DispatchIndirectCallTarget:
return "cfg tgt";
+ case WellKnownArg::SwiftSelf:
+ return "swift self";
default:
return nullptr;
}
@@ -13605,22 +13470,6 @@ void Compiler::gtDispLIRNode(GenTree* node, const char* prefixMsg /* = nullptr *
displayOperand(operand, buf, operandArc, indentStack, prefixIndent);
}
}
- else if (node->OperIs(GT_STORE_DYN_BLK))
- {
- if (operand == node->AsBlk()->Addr())
- {
- displayOperand(operand, "lhs", operandArc, indentStack, prefixIndent);
- }
- else if (operand == node->AsBlk()->Data())
- {
- displayOperand(operand, "rhs", operandArc, indentStack, prefixIndent);
- }
- else
- {
- assert(operand == node->AsStoreDynBlk()->gtDynamicSize);
- displayOperand(operand, "size", operandArc, indentStack, prefixIndent);
- }
- }
else
{
displayOperand(operand, "", operandArc, indentStack, prefixIndent);
@@ -16977,22 +16826,6 @@ bool Compiler::gtSplitTree(
}
private:
- bool IsLocation(const UseInfo& useInf)
- {
- if (useInf.User == nullptr)
- {
- return false;
- }
-
- if (useInf.User->OperIs(GT_STORE_DYN_BLK) && !(*useInf.Use)->OperIs(GT_CNS_INT, GT_INIT_VAL) &&
- (useInf.Use == &useInf.User->AsStoreDynBlk()->Data()))
- {
- return true;
- }
-
- return false;
- }
-
bool IsReturned(const UseInfo& useInf, bool userIsReturned)
{
if (useInf.User != nullptr)
@@ -17086,18 +16919,6 @@ bool Compiler::gtSplitTree(
return;
}
- if (IsLocation(useInf))
- {
- // Only a handful of nodes can be location, and they are all unary or nullary.
- assert((*use)->OperIs(GT_IND, GT_BLK, GT_LCL_VAR, GT_LCL_FLD));
- if ((*use)->OperIsUnary())
- {
- SplitOutUse(UseInfo{&(*use)->AsUnOp()->gtOp1, user}, false);
- }
-
- return;
- }
-
#ifndef TARGET_64BIT
// GT_MUL with GTF_MUL_64RSLT is required to stay with casts on the
// operands. Note that one operand may also be a constant, but we
diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h
index 26ab5b2c705a..328860eb1713 100644
--- a/src/coreclr/jit/gentree.h
+++ b/src/coreclr/jit/gentree.h
@@ -194,13 +194,13 @@ inline AssertionIndex GetAssertionIndex(unsigned index)
class AssertionInfo
{
- // true if the assertion holds on the bbNext edge instead of the bbTarget edge (for GT_JTRUE nodes)
- unsigned short m_isNextEdgeAssertion : 1;
+ // true if the assertion holds on the false edge instead of the true edge (for GT_JTRUE nodes)
+ unsigned short m_assertionHoldsOnFalseEdge : 1;
// 1-based index of the assertion
unsigned short m_assertionIndex : 15;
- AssertionInfo(bool isNextEdgeAssertion, AssertionIndex assertionIndex)
- : m_isNextEdgeAssertion(isNextEdgeAssertion), m_assertionIndex(assertionIndex)
+ AssertionInfo(bool assertionHoldsOnFalseEdge, AssertionIndex assertionIndex)
+ : m_assertionHoldsOnFalseEdge(assertionHoldsOnFalseEdge), m_assertionIndex(assertionIndex)
{
assert(m_assertionIndex == assertionIndex);
}
@@ -223,8 +223,8 @@ public:
void Clear()
{
- m_isNextEdgeAssertion = 0;
- m_assertionIndex = NO_ASSERTION_INDEX;
+ m_assertionHoldsOnFalseEdge = 0;
+ m_assertionIndex = NO_ASSERTION_INDEX;
}
bool HasAssertion() const
@@ -237,9 +237,9 @@ public:
return m_assertionIndex;
}
- bool IsNextEdgeAssertion() const
+ bool AssertionHoldsOnFalseEdge() const
{
- return m_isNextEdgeAssertion;
+ return m_assertionHoldsOnFalseEdge;
}
};
@@ -1216,7 +1216,7 @@ public:
static bool OperIsStoreBlk(genTreeOps gtOper)
{
- return StaticOperIs(gtOper, GT_STORE_BLK, GT_STORE_DYN_BLK);
+ return StaticOperIs(gtOper, GT_STORE_BLK);
}
bool OperIsStoreBlk() const
@@ -1545,7 +1545,7 @@ public:
static bool OperIsIndir(genTreeOps gtOper)
{
static_assert_no_msg(AreContiguous(GT_LOCKADD, GT_XAND, GT_XORR, GT_XADD, GT_XCHG, GT_CMPXCHG, GT_IND,
- GT_STOREIND, GT_BLK, GT_STORE_BLK, GT_STORE_DYN_BLK, GT_NULLCHECK));
+ GT_STOREIND, GT_BLK, GT_STORE_BLK, GT_NULLCHECK));
return (GT_LOCKADD <= gtOper) && (gtOper <= GT_NULLCHECK);
}
@@ -2862,7 +2862,6 @@ class GenTreeUseEdgeIterator final
// Advance functions for special nodes
void AdvanceCmpXchg();
void AdvanceArrElem();
- void AdvanceStoreDynBlk();
void AdvanceFieldList();
void AdvancePhi();
void AdvanceConditional();
@@ -4113,6 +4112,10 @@ enum GenTreeCallFlags : unsigned int
GTF_CALL_M_CAST_CAN_BE_EXPANDED = 0x04000000, // this cast (helper call) can be expanded if it's profitable. To be removed.
GTF_CALL_M_CAST_OBJ_NONNULL = 0x08000000, // if we expand this specific cast we don't need to check the input object for null
// NOTE: if needed, this flag can be removed, and we can introduce new _NONNUL cast helpers
+
+#ifdef SWIFT_SUPPORT
+ GTF_CALL_M_SWIFT_ERROR_HANDLING = 0x10000000, // call uses the Swift calling convention, and error register will be checked after it returns.
+#endif // SWIFT_SUPPORT
};
inline constexpr GenTreeCallFlags operator ~(GenTreeCallFlags a)
@@ -4398,7 +4401,7 @@ enum class CFGCallKind
class CallArgs;
-enum class WellKnownArg
+enum class WellKnownArg : unsigned
{
None,
ThisPointer,
@@ -4416,6 +4419,7 @@ enum class WellKnownArg
R2RIndirectionCell,
ValidateIndirectCallTarget,
DispatchIndirectCallTarget,
+ SwiftSelf,
};
#ifdef DEBUG
@@ -4725,6 +4729,7 @@ public:
CORINFO_CLASS_HANDLE GetSignatureClassHandle() { return m_signatureClsHnd; }
var_types GetSignatureType() { return m_signatureType; }
WellKnownArg GetWellKnownArg() { return m_wellKnownArg; }
+ void SetWellKnownArg(const WellKnownArg argType) { m_wellKnownArg = argType; }
bool IsTemp() { return m_isTmp; }
// clang-format on
@@ -7354,15 +7359,10 @@ private:
public:
ClassLayout* GetLayout() const
{
+ assert(m_layout != nullptr);
return m_layout;
}
- void SetLayout(ClassLayout* layout)
- {
- assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
- m_layout = layout;
- }
-
// The data to be stored (null for GT_BLK)
GenTree*& Data()
{
@@ -7376,8 +7376,7 @@ public:
// The size of the buffer to be copied.
unsigned Size() const
{
- assert((m_layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
- return (m_layout != nullptr) ? m_layout->GetSize() : 0;
+ return m_layout->GetSize();
}
// Instruction selection: during codegen time, what code sequence we will be using
@@ -7403,7 +7402,7 @@ public:
bool ContainsReferences()
{
- return (m_layout != nullptr) && m_layout->HasGCPtr();
+ return m_layout->HasGCPtr();
}
bool IsOnHeapAndContainsReferences()
@@ -7434,8 +7433,8 @@ public:
void Initialize(ClassLayout* layout)
{
- assert(OperIsBlk(OperGet()) && ((layout != nullptr) || OperIs(GT_STORE_DYN_BLK)));
- assert((layout == nullptr) || (layout->GetSize() != 0));
+ assert(layout != nullptr);
+ assert(layout->GetSize() != 0);
m_layout = layout;
gtBlkOpKind = BlkOpKindInvalid;
@@ -7453,35 +7452,6 @@ protected:
#endif // DEBUGGABLE_GENTREE
};
-// GenTreeStoreDynBlk -- 'dynamic block store' (GT_STORE_DYN_BLK).
-//
-// This node is used to represent stores that have a dynamic size - the "cpblk" and "initblk"
-// IL instructions are implemented with it. Note that such stores assume the input has no GC
-// pointers in it, and as such do not ever use write barriers.
-//
-// The "Data()" member of this node will either be a "dummy" IND(struct) node, for "cpblk", or
-// the zero constant/INIT_VAL for "initblk".
-//
-struct GenTreeStoreDynBlk : public GenTreeBlk
-{
-public:
- GenTree* gtDynamicSize;
-
- GenTreeStoreDynBlk(GenTree* dstAddr, GenTree* data, GenTree* dynamicSize)
- : GenTreeBlk(GT_STORE_DYN_BLK, TYP_VOID, dstAddr, data, nullptr), gtDynamicSize(dynamicSize)
- {
- gtFlags |= dynamicSize->gtFlags & GTF_ALL_EFFECT;
- }
-
-#if DEBUGGABLE_GENTREE
-protected:
- friend GenTree;
- GenTreeStoreDynBlk() : GenTreeBlk()
- {
- }
-#endif // DEBUGGABLE_GENTREE
-};
-
// Read-modify-write status of a RMW memory op rooted at a storeInd
enum RMWStatus
{
@@ -8896,10 +8866,6 @@ struct GenTreeCCMP final : public GenTreeOpCC
inline bool GenTree::OperIsBlkOp()
{
- if (OperIs(GT_STORE_DYN_BLK))
- {
- return true;
- }
if (OperIsStore())
{
return varTypeIsStruct(this);
@@ -9307,7 +9273,7 @@ inline GenTree* GenTree::gtGetOp2IfPresent() const
inline GenTree*& GenTree::Data()
{
- assert(OperIsStore() || OperIs(GT_STORE_DYN_BLK));
+ assert(OperIsStore());
return OperIsLocalStore() ? AsLclVarCommon()->Data() : AsIndir()->Data();
}
diff --git a/src/coreclr/jit/gtlist.h b/src/coreclr/jit/gtlist.h
index 00696e6398fc..26513f0f3441 100644
--- a/src/coreclr/jit/gtlist.h
+++ b/src/coreclr/jit/gtlist.h
@@ -37,6 +37,7 @@ GTNODE(LABEL , GenTree ,0,0,GTK_LEAF) // Jump-
GTNODE(JMP , GenTreeVal ,0,0,GTK_LEAF|GTK_NOVALUE) // Jump to another function
GTNODE(FTN_ADDR , GenTreeFptrVal ,0,0,GTK_LEAF) // Address of a function
GTNODE(RET_EXPR , GenTreeRetExpr ,0,0,GTK_LEAF|DBK_NOTLIR) // Place holder for the return expression from an inline candidate
+GTNODE(SWIFT_ERROR , GenTree ,0,0,GTK_LEAF) // Error register value post-Swift call
//-----------------------------------------------------------------------------
// Constant nodes:
@@ -82,7 +83,6 @@ GTNODE(IND , GenTreeIndir ,0,1,GTK_UNOP)
GTNODE(STOREIND , GenTreeStoreInd ,0,1,GTK_BINOP|GTK_EXOP|GTK_NOVALUE|GTK_STORE) // Store indirection
GTNODE(BLK , GenTreeBlk ,0,1,GTK_UNOP|GTK_EXOP) // Struct load
GTNODE(STORE_BLK , GenTreeBlk ,0,1,GTK_BINOP|GTK_EXOP|GTK_NOVALUE|GTK_STORE) // Struct store
-GTNODE(STORE_DYN_BLK , GenTreeStoreDynBlk ,0,1,GTK_SPECIAL|GTK_NOVALUE) // Dynamically sized block store, with native uint size
GTNODE(NULLCHECK , GenTreeIndir ,0,1,GTK_UNOP|GTK_NOVALUE) // Null checks the source
GTNODE(ARR_LENGTH , GenTreeArrLen ,0,0,GTK_UNOP|GTK_EXOP) // single-dimension (SZ) array length
diff --git a/src/coreclr/jit/gtstructs.h b/src/coreclr/jit/gtstructs.h
index 989b6e554eae..e6823478a3c9 100644
--- a/src/coreclr/jit/gtstructs.h
+++ b/src/coreclr/jit/gtstructs.h
@@ -88,9 +88,8 @@ GTSTRUCT_1(AddrMode , GT_LEA)
GTSTRUCT_1(Qmark , GT_QMARK)
GTSTRUCT_1(PhiArg , GT_PHI_ARG)
GTSTRUCT_1(Phi , GT_PHI)
-GTSTRUCT_N(Indir , GT_IND, GT_NULLCHECK, GT_BLK, GT_STORE_BLK, GT_STORE_DYN_BLK, GT_LOCKADD, GT_XAND, GT_XORR, GT_XADD, GT_XCHG, GT_CMPXCHG, GT_STOREIND)
-GTSTRUCT_N(Blk , GT_BLK, GT_STORE_BLK, GT_STORE_DYN_BLK)
-GTSTRUCT_1(StoreDynBlk , GT_STORE_DYN_BLK)
+GTSTRUCT_N(Indir , GT_IND, GT_NULLCHECK, GT_BLK, GT_STORE_BLK, GT_LOCKADD, GT_XAND, GT_XORR, GT_XADD, GT_XCHG, GT_CMPXCHG, GT_STOREIND)
+GTSTRUCT_N(Blk , GT_BLK, GT_STORE_BLK)
GTSTRUCT_1(StoreInd , GT_STOREIND)
GTSTRUCT_1(CmpXchg , GT_CMPXCHG)
#ifdef TARGET_ARM64
diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp
index 39f7b8f09d27..fa277b07db21 100644
--- a/src/coreclr/jit/helperexpansion.cpp
+++ b/src/coreclr/jit/helperexpansion.cpp
@@ -319,21 +319,11 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm
// Fallback basic block
GenTree* fallbackValueDef = gtNewStoreLclVarNode(rtLookupLcl->GetLclNum(), call);
- BasicBlock* fallbackBb =
- fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fallbackValueDef, debugInfo, nullcheckBb->Next(), true);
-
- assert(fallbackBb->JumpsToNext());
- fallbackBb->SetFlags(BBF_NONE_QUIRK);
-
- // Set nullcheckBb's true jump target
- nullcheckBb->SetTrueTarget(fallbackBb);
+ BasicBlock* fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fallbackValueDef, debugInfo, true);
// Fast-path basic block
GenTree* fastpathValueDef = gtNewStoreLclVarNode(rtLookupLcl->GetLclNum(), fastPathValueClone);
- BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fastpathValueDef, debugInfo, block);
-
- // Set nullcheckBb's false jump target
- nullcheckBb->SetFalseTarget(fastPathBb);
+ BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fastpathValueDef, debugInfo);
BasicBlock* sizeCheckBb = nullptr;
if (needsSizeCheck)
@@ -375,42 +365,59 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm
GenTree* jtrue = gtNewOperNode(GT_JTRUE, TYP_VOID, sizeCheck);
// sizeCheckBb fails - jump to fallbackBb
- sizeCheckBb = fgNewBBFromTreeAfter(BBJ_COND, prevBb, jtrue, debugInfo, fallbackBb);
- sizeCheckBb->SetFalseTarget(nullcheckBb);
+ sizeCheckBb = fgNewBBFromTreeAfter(BBJ_COND, prevBb, jtrue, debugInfo);
}
//
// Update preds in all new blocks
//
- fgRemoveRefPred(block, prevBb);
- fgAddRefPred(block, fastPathBb);
- fgAddRefPred(block, fallbackBb);
assert(prevBb->KindIs(BBJ_ALWAYS));
+ fgRemoveRefPred(prevBb->GetTargetEdge());
+
+ {
+ FlowEdge* const newEdge = fgAddRefPred(block, fastPathBb);
+ fastPathBb->SetTargetEdge(newEdge);
+ }
+
+ {
+ FlowEdge* const newEdge = fgAddRefPred(block, fallbackBb);
+ fallbackBb->SetTargetEdge(newEdge);
+ assert(fallbackBb->JumpsToNext());
+ fallbackBb->SetFlags(BBF_NONE_QUIRK);
+ }
if (needsSizeCheck)
{
// sizeCheckBb is the first block after prevBb
- prevBb->SetTarget(sizeCheckBb);
- fgAddRefPred(sizeCheckBb, prevBb);
+ FlowEdge* const newEdge = fgAddRefPred(sizeCheckBb, prevBb);
+ prevBb->SetTargetEdge(newEdge);
+
// sizeCheckBb flows into nullcheckBb in case if the size check passes
- fgAddRefPred(nullcheckBb, sizeCheckBb);
+ {
+ FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, sizeCheckBb);
+ FlowEdge* const falseEdge = fgAddRefPred(nullcheckBb, sizeCheckBb);
+ sizeCheckBb->SetTrueEdge(trueEdge);
+ sizeCheckBb->SetFalseEdge(falseEdge);
+ }
+
// fallbackBb is reachable from both nullcheckBb and sizeCheckBb
- fgAddRefPred(fallbackBb, nullcheckBb);
- fgAddRefPred(fallbackBb, sizeCheckBb);
// fastPathBb is only reachable from successful nullcheckBb
- fgAddRefPred(fastPathBb, nullcheckBb);
}
else
{
// nullcheckBb is the first block after prevBb
- prevBb->SetTarget(nullcheckBb);
- fgAddRefPred(nullcheckBb, prevBb);
+ FlowEdge* const newEdge = fgAddRefPred(nullcheckBb, prevBb);
+ prevBb->SetTargetEdge(newEdge);
+
// No size check, nullcheckBb jumps to fast path
- fgAddRefPred(fastPathBb, nullcheckBb);
// fallbackBb is only reachable from nullcheckBb (jump destination)
- fgAddRefPred(fallbackBb, nullcheckBb);
}
+ FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, nullcheckBb);
+ FlowEdge* const falseEdge = fgAddRefPred(fastPathBb, nullcheckBb);
+ nullcheckBb->SetTrueEdge(trueEdge);
+ nullcheckBb->SetFalseEdge(falseEdge);
+
//
// Re-distribute weights (see '[weight: X]' on the diagrams above)
// TODO: consider marking fallbackBb as rarely-taken
@@ -699,11 +706,10 @@ bool Compiler::fgExpandThreadLocalAccessForCallNativeAOT(BasicBlock** pBlock, St
// fallbackBb
GenTree* fallbackValueDef = gtNewStoreLclVarNode(finalLclNum, slowHelper);
- BasicBlock* fallbackBb =
- fgNewBBFromTreeAfter(BBJ_ALWAYS, tlsRootNullCondBB, fallbackValueDef, debugInfo, block, true);
+ BasicBlock* fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, tlsRootNullCondBB, fallbackValueDef, debugInfo, true);
GenTree* fastPathValueDef = gtNewStoreLclVarNode(finalLclNum, gtCloneExpr(finalLcl));
- BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, fastPathValueDef, debugInfo, block, true);
+ BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, fastPathValueDef, debugInfo, true);
*callUse = finalLcl;
@@ -713,14 +719,20 @@ bool Compiler::fgExpandThreadLocalAccessForCallNativeAOT(BasicBlock** pBlock, St
//
// Update preds in all new blocks
//
- fgAddRefPred(fallbackBb, tlsRootNullCondBB);
- fgAddRefPred(fastPathBb, tlsRootNullCondBB);
+ FlowEdge* const trueEdge = fgAddRefPred(fastPathBb, tlsRootNullCondBB);
+ FlowEdge* const falseEdge = fgAddRefPred(fallbackBb, tlsRootNullCondBB);
+ tlsRootNullCondBB->SetTrueEdge(trueEdge);
+ tlsRootNullCondBB->SetFalseEdge(falseEdge);
- fgAddRefPred(block, fallbackBb);
- fgAddRefPred(block, fastPathBb);
+ {
+ FlowEdge* const newEdge = fgAddRefPred(block, fallbackBb);
+ fallbackBb->SetTargetEdge(newEdge);
+ }
- tlsRootNullCondBB->SetTrueTarget(fastPathBb);
- tlsRootNullCondBB->SetFalseTarget(fallbackBb);
+ {
+ FlowEdge* const newEdge = fgAddRefPred(block, fastPathBb);
+ fastPathBb->SetTargetEdge(newEdge);
+ }
// Inherit the weights
block->inheritWeight(prevBb);
@@ -730,9 +742,9 @@ bool Compiler::fgExpandThreadLocalAccessForCallNativeAOT(BasicBlock** pBlock, St
// fallback will just execute first time
fallbackBb->bbSetRunRarely();
- fgRemoveRefPred(block, prevBb);
- fgAddRefPred(tlsRootNullCondBB, prevBb);
- prevBb->SetTarget(tlsRootNullCondBB);
+ fgRemoveRefPred(prevBb->GetTargetEdge());
+ FlowEdge* const newEdge = fgAddRefPred(tlsRootNullCondBB, prevBb);
+ prevBb->SetTargetEdge(newEdge);
// All blocks are expected to be in the same EH region
assert(BasicBlock::sameEHRegion(prevBb, block));
@@ -1056,7 +1068,7 @@ bool Compiler::fgExpandThreadLocalAccessForCall(BasicBlock** pBlock, Statement*
// fallbackBb
GenTree* fallbackValueDef = gtNewStoreLclVarNode(threadStaticBlockLclNum, call);
BasicBlock* fallbackBb =
- fgNewBBFromTreeAfter(BBJ_ALWAYS, threadStaticBlockNullCondBB, fallbackValueDef, debugInfo, block, true);
+ fgNewBBFromTreeAfter(BBJ_ALWAYS, threadStaticBlockNullCondBB, fallbackValueDef, debugInfo, true);
// fastPathBb
if (isGCThreadStatic)
@@ -1071,32 +1083,42 @@ bool Compiler::fgExpandThreadLocalAccessForCall(BasicBlock** pBlock, Statement*
GenTree* fastPathValueDef =
gtNewStoreLclVarNode(threadStaticBlockLclNum, gtCloneExpr(threadStaticBlockBaseLclValueUse));
- BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, fastPathValueDef, debugInfo, block, true);
-
- // Set maxThreadStaticBlocksCondBB's jump targets
- maxThreadStaticBlocksCondBB->SetTrueTarget(fallbackBb);
- maxThreadStaticBlocksCondBB->SetFalseTarget(threadStaticBlockNullCondBB);
-
- // Set threadStaticBlockNullCondBB's jump targets
- threadStaticBlockNullCondBB->SetTrueTarget(fastPathBb);
- threadStaticBlockNullCondBB->SetFalseTarget(fallbackBb);
+ BasicBlock* fastPathBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, fastPathValueDef, debugInfo, true);
//
// Update preds in all new blocks
//
assert(prevBb->KindIs(BBJ_ALWAYS));
- prevBb->SetTarget(maxThreadStaticBlocksCondBB);
- fgRemoveRefPred(block, prevBb);
- fgAddRefPred(maxThreadStaticBlocksCondBB, prevBb);
+ fgRemoveRefPred(prevBb->GetTargetEdge());
+
+ {
+ FlowEdge* const newEdge = fgAddRefPred(maxThreadStaticBlocksCondBB, prevBb);
+ prevBb->SetTargetEdge(newEdge);
+ }
- fgAddRefPred(threadStaticBlockNullCondBB, maxThreadStaticBlocksCondBB);
- fgAddRefPred(fallbackBb, maxThreadStaticBlocksCondBB);
+ {
+ FlowEdge* const trueEdge = fgAddRefPred(fallbackBb, maxThreadStaticBlocksCondBB);
+ FlowEdge* const falseEdge = fgAddRefPred(threadStaticBlockNullCondBB, maxThreadStaticBlocksCondBB);
+ maxThreadStaticBlocksCondBB->SetTrueEdge(trueEdge);
+ maxThreadStaticBlocksCondBB->SetFalseEdge(falseEdge);
+ }
- fgAddRefPred(fastPathBb, threadStaticBlockNullCondBB);
- fgAddRefPred(fallbackBb, threadStaticBlockNullCondBB);
+ {
+ FlowEdge* const trueEdge = fgAddRefPred(fastPathBb, threadStaticBlockNullCondBB);
+ FlowEdge* const falseEdge = fgAddRefPred(fallbackBb, threadStaticBlockNullCondBB);
+ threadStaticBlockNullCondBB->SetTrueEdge(trueEdge);
+ threadStaticBlockNullCondBB->SetFalseEdge(falseEdge);
+ }
- fgAddRefPred(block, fastPathBb);
- fgAddRefPred(block, fallbackBb);
+ {
+ FlowEdge* const newEdge = fgAddRefPred(block, fastPathBb);
+ fastPathBb->SetTargetEdge(newEdge);
+ }
+
+ {
+ FlowEdge* const newEdge = fgAddRefPred(block, fallbackBb);
+ fallbackBb->SetTargetEdge(newEdge);
+ }
// Inherit the weights
block->inheritWeight(prevBb);
@@ -1376,14 +1398,12 @@ bool Compiler::fgExpandStaticInitForCall(BasicBlock** pBlock, Statement* stmt, G
GenTree* isInitedCmp = gtNewOperNode(GT_EQ, TYP_INT, isInitedActualValueNode, isInitedExpectedValue);
isInitedCmp->gtFlags |= GTF_RELOP_JMP_USED;
BasicBlock* isInitedBb =
- fgNewBBFromTreeAfter(BBJ_COND, prevBb, gtNewOperNode(GT_JTRUE, TYP_VOID, isInitedCmp), debugInfo, block);
+ fgNewBBFromTreeAfter(BBJ_COND, prevBb, gtNewOperNode(GT_JTRUE, TYP_VOID, isInitedCmp), debugInfo);
// Fallback basic block
// TODO-CQ: for JIT we can replace the original call with CORINFO_HELP_INITCLASS
// that only accepts a single argument
- BasicBlock* helperCallBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, isInitedBb, call, debugInfo, isInitedBb->Next(), true);
- assert(helperCallBb->JumpsToNext());
- helperCallBb->SetFlags(BBF_NONE_QUIRK);
+ BasicBlock* helperCallBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, isInitedBb, call, debugInfo, true);
GenTree* replacementNode = nullptr;
if (retValKind == SHRV_STATIC_BASE_PTR)
@@ -1443,22 +1463,32 @@ bool Compiler::fgExpandStaticInitForCall(BasicBlock** pBlock, Statement* stmt, G
//
// Unlink block and prevBb
- fgRemoveRefPred(block, prevBb);
+ fgRemoveRefPred(prevBb->GetTargetEdge());
- // Block has two preds now: either isInitedBb or helperCallBb
- fgAddRefPred(block, isInitedBb);
- fgAddRefPred(block, helperCallBb);
+ {
+ // Block has two preds now: either isInitedBb or helperCallBb
+ FlowEdge* const newEdge = fgAddRefPred(block, helperCallBb);
+ helperCallBb->SetTargetEdge(newEdge);
+ assert(helperCallBb->JumpsToNext());
+ helperCallBb->SetFlags(BBF_NONE_QUIRK);
+ }
- // prevBb always flows into isInitedBb
- assert(prevBb->KindIs(BBJ_ALWAYS));
- prevBb->SetTarget(isInitedBb);
- prevBb->SetFlags(BBF_NONE_QUIRK);
- assert(prevBb->JumpsToNext());
- fgAddRefPred(isInitedBb, prevBb);
+ {
+ // prevBb always flows into isInitedBb
+ assert(prevBb->KindIs(BBJ_ALWAYS));
+ FlowEdge* const newEdge = fgAddRefPred(isInitedBb, prevBb);
+ prevBb->SetTargetEdge(newEdge);
+ prevBb->SetFlags(BBF_NONE_QUIRK);
+ assert(prevBb->JumpsToNext());
+ }
- // Both fastPathBb and helperCallBb have a single common pred - isInitedBb
- isInitedBb->SetFalseTarget(helperCallBb);
- fgAddRefPred(helperCallBb, isInitedBb);
+ {
+ // Both fastPathBb and helperCallBb have a single common pred - isInitedBb
+ FlowEdge* const trueEdge = fgAddRefPred(block, isInitedBb);
+ FlowEdge* const falseEdge = fgAddRefPred(helperCallBb, isInitedBb);
+ isInitedBb->SetTrueEdge(trueEdge);
+ isInitedBb->SetFalseEdge(falseEdge);
+ }
//
// Re-distribute weights
@@ -1687,7 +1717,7 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock,
//
// Block 1: lengthCheckBb (we check that dstLen < srcLen)
//
- BasicBlock* lengthCheckBb = fgNewBBafter(BBJ_COND, prevBb, true, block);
+ BasicBlock* lengthCheckBb = fgNewBBafter(BBJ_COND, prevBb, true);
lengthCheckBb->SetFlags(BBF_INTERNAL);
// Set bytesWritten -1 by default, if the fast path is not taken we'll return it as the result.
@@ -1709,9 +1739,8 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock,
// In theory, we could just emit the const U8 data to the data section and use GT_BLK here
// but that would be a bit less efficient since we would have to load the data from memory.
//
- BasicBlock* fastpathBb = fgNewBBafter(BBJ_ALWAYS, lengthCheckBb, true, lengthCheckBb->Next());
- assert(fastpathBb->JumpsToNext());
- fastpathBb->SetFlags(BBF_INTERNAL | BBF_NONE_QUIRK);
+ BasicBlock* fastpathBb = fgNewBBafter(BBJ_ALWAYS, lengthCheckBb, true);
+ fastpathBb->SetFlags(BBF_INTERNAL);
// The widest type we can use for loads
const var_types maxLoadType = roundDownMaxType(srcLenU8);
@@ -1764,19 +1793,32 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock,
// Update preds in all new blocks
//
// block is no longer a predecessor of prevBb
- fgRemoveRefPred(block, prevBb);
- // prevBb flows into lengthCheckBb
- assert(prevBb->KindIs(BBJ_ALWAYS));
- prevBb->SetTarget(lengthCheckBb);
- prevBb->SetFlags(BBF_NONE_QUIRK);
- assert(prevBb->JumpsToNext());
- fgAddRefPred(lengthCheckBb, prevBb);
- // lengthCheckBb has two successors: block and fastpathBb
- lengthCheckBb->SetFalseTarget(fastpathBb);
- fgAddRefPred(fastpathBb, lengthCheckBb);
- fgAddRefPred(block, lengthCheckBb);
- // fastpathBb flows into block
- fgAddRefPred(block, fastpathBb);
+ fgRemoveRefPred(prevBb->GetTargetEdge());
+
+ {
+ // prevBb flows into lengthCheckBb
+ assert(prevBb->KindIs(BBJ_ALWAYS));
+ FlowEdge* const newEdge = fgAddRefPred(lengthCheckBb, prevBb);
+ prevBb->SetTargetEdge(newEdge);
+ prevBb->SetFlags(BBF_NONE_QUIRK);
+ assert(prevBb->JumpsToNext());
+ }
+
+ {
+ // lengthCheckBb has two successors: block and fastpathBb
+ FlowEdge* const trueEdge = fgAddRefPred(block, lengthCheckBb);
+ FlowEdge* const falseEdge = fgAddRefPred(fastpathBb, lengthCheckBb);
+ lengthCheckBb->SetTrueEdge(trueEdge);
+ lengthCheckBb->SetFalseEdge(falseEdge);
+ }
+
+ {
+ // fastpathBb flows into block
+ FlowEdge* const newEdge = fgAddRefPred(block, fastpathBb);
+ fastpathBb->SetTargetEdge(newEdge);
+ assert(fastpathBb->JumpsToNext());
+ fastpathBb->SetFlags(BBF_NONE_QUIRK);
+ }
//
// Re-distribute weights
@@ -2344,8 +2386,8 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt,
// it's too late to rely on upstream phases to do this for us (unless we do optRepeat).
GenTree* nullcheckOp = gtNewOperNode(GT_EQ, TYP_INT, tmpNode, gtNewNull());
nullcheckOp->gtFlags |= GTF_RELOP_JMP_USED;
- BasicBlock* nullcheckBb = fgNewBBFromTreeAfter(BBJ_COND, firstBb, gtNewOperNode(GT_JTRUE, TYP_VOID, nullcheckOp),
- debugInfo, lastBb, true);
+ BasicBlock* nullcheckBb =
+ fgNewBBFromTreeAfter(BBJ_COND, firstBb, gtNewOperNode(GT_JTRUE, TYP_VOID, nullcheckOp), debugInfo, true);
// The very first statement in the whole expansion is to assign obj to tmp.
// We assume it's the value we're going to return in most cases.
@@ -2385,7 +2427,7 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt,
GenTree* mtCheck = gtNewOperNode(GT_EQ, TYP_INT, gtNewMethodTableLookup(gtCloneExpr(tmpNode)), expectedClsNode);
mtCheck->gtFlags |= GTF_RELOP_JMP_USED;
GenTree* jtrue = gtNewOperNode(GT_JTRUE, TYP_VOID, mtCheck);
- typeChecksBbs[candidateId] = fgNewBBFromTreeAfter(BBJ_COND, lastTypeCheckBb, jtrue, debugInfo, lastBb, true);
+ typeChecksBbs[candidateId] = fgNewBBFromTreeAfter(BBJ_COND, lastTypeCheckBb, jtrue, debugInfo, true);
lastTypeCheckBb = typeChecksBbs[candidateId];
// Insert the CSE node as the first statement in the block
@@ -2407,13 +2449,13 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt,
{
// fallback call is used only to throw InvalidCastException
call->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN;
- fallbackBb = fgNewBBFromTreeAfter(BBJ_THROW, lastTypeCheckBb, call, debugInfo, nullptr, true);
+ fallbackBb = fgNewBBFromTreeAfter(BBJ_THROW, lastTypeCheckBb, call, debugInfo, true);
}
else if (typeCheckFailedAction == TypeCheckFailedAction::ReturnNull)
{
// if fallback call is not needed, we just assign null to tmp
GenTree* fallbackTree = gtNewTempStore(tmpNum, gtNewNull());
- fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, lastBb, true);
+ fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, true);
}
else
{
@@ -2424,7 +2466,7 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt,
call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_CHKCASTCLASS_SPECIAL);
}
GenTree* fallbackTree = gtNewTempStore(tmpNum, call);
- fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, lastBb, true);
+ fallbackBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, lastTypeCheckBb, fallbackTree, debugInfo, true);
}
// Block 4: typeCheckSucceedBb
@@ -2439,15 +2481,11 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt,
typeCheckSucceedTree = gtNewNothingNode();
}
BasicBlock* typeCheckSucceedBb =
- typeCheckNotNeeded ? nullptr
- : fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, typeCheckSucceedTree, debugInfo, lastBb);
+ typeCheckNotNeeded ? nullptr : fgNewBBFromTreeAfter(BBJ_ALWAYS, fallbackBb, typeCheckSucceedTree, debugInfo);
//
// Wire up the blocks
//
- firstBb->SetTarget(nullcheckBb);
- nullcheckBb->SetTrueTarget(lastBb);
- nullcheckBb->SetFalseTarget(typeCheckNotNeeded ? fallbackBb : typeChecksBbs[0]);
// Tricky case - wire up multiple type check blocks (in most cases there is only one)
for (int candidateId = 0; candidateId < numOfCandidates; candidateId++)
@@ -2455,41 +2493,48 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt,
BasicBlock* curTypeCheckBb = typeChecksBbs[candidateId];
// All type checks jump straight to the typeCheckSucceedBb on success
- curTypeCheckBb->SetTrueTarget(typeCheckSucceedBb);
- fgAddRefPred(typeCheckSucceedBb, curTypeCheckBb);
+ FlowEdge* const trueEdge = fgAddRefPred(typeCheckSucceedBb, curTypeCheckBb);
+ curTypeCheckBb->SetTrueEdge(trueEdge);
// or ...
if (candidateId == numOfCandidates - 1)
{
// ... jump to the fallbackBb on last type check's failure
- curTypeCheckBb->SetFalseTarget(fallbackBb);
- fgAddRefPred(fallbackBb, curTypeCheckBb);
+ FlowEdge* const falseEdge = fgAddRefPred(fallbackBb, curTypeCheckBb);
+ curTypeCheckBb->SetFalseEdge(falseEdge);
}
else
{
// ... jump to the next type check on failure
- curTypeCheckBb->SetFalseTarget(typeChecksBbs[candidateId + 1]);
- fgAddRefPred(typeChecksBbs[candidateId + 1], curTypeCheckBb);
+ FlowEdge* const falseEdge = fgAddRefPred(typeChecksBbs[candidateId + 1], curTypeCheckBb);
+ curTypeCheckBb->SetFalseEdge(falseEdge);
}
}
- fgRemoveRefPred(lastBb, firstBb);
- fgAddRefPred(nullcheckBb, firstBb);
- fgAddRefPred(lastBb, nullcheckBb);
- if (typeCheckNotNeeded)
+ fgRemoveRefPred(firstBb->GetTargetEdge());
+
{
- fgAddRefPred(fallbackBb, nullcheckBb);
+ FlowEdge* const newEdge = fgAddRefPred(nullcheckBb, firstBb);
+ firstBb->SetTargetEdge(newEdge);
}
- else
+
{
- fgAddRefPred(typeChecksBbs[0], nullcheckBb);
- fgAddRefPred(lastBb, typeCheckSucceedBb);
+ FlowEdge* const trueEdge = fgAddRefPred(lastBb, nullcheckBb);
+ nullcheckBb->SetTrueEdge(trueEdge);
}
- if (!fallbackBb->KindIs(BBJ_THROW))
+ if (typeCheckNotNeeded)
+ {
+ FlowEdge* const falseEdge = fgAddRefPred(fallbackBb, nullcheckBb);
+ nullcheckBb->SetFalseEdge(falseEdge);
+ }
+ else
{
- // if fallbackBb is BBJ_THROW then it has no successors
- fgAddRefPred(lastBb, fallbackBb);
+ FlowEdge* const falseEdge = fgAddRefPred(typeChecksBbs[0], nullcheckBb);
+ nullcheckBb->SetFalseEdge(falseEdge);
+
+ FlowEdge* const newEdge = fgAddRefPred(lastBb, typeCheckSucceedBb);
+ typeCheckSucceedBb->SetTargetEdge(newEdge);
}
//
@@ -2521,12 +2566,18 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt,
}
else
{
+ assert(fallbackBb->KindIs(BBJ_ALWAYS));
+ FlowEdge* const newEdge = fgAddRefPred(lastBb, fallbackBb);
+ fallbackBb->SetTargetEdge(newEdge);
+
fallbackBb->inheritWeightPercentage(lastTypeCheckBb, 100 - totalLikelihood);
}
+
if (!typeCheckNotNeeded)
{
typeCheckSucceedBb->inheritWeightPercentage(typeChecksBbs[0], totalLikelihood);
}
+
lastBb->inheritWeight(firstBb);
//
@@ -2537,12 +2588,12 @@ bool Compiler::fgLateCastExpansionForCall(BasicBlock** pBlock, Statement* stmt,
assert(BasicBlock::sameEHRegion(firstBb, fallbackBb));
// call guarantees that obj is never null, we can drop the nullcheck
- // by converting it to a BBJ_ALWAYS to typeCheckBb.
+ // by converting it to a BBJ_ALWAYS to its false target.
if ((call->gtCallMoreFlags & GTF_CALL_M_CAST_OBJ_NONNULL) != 0)
{
fgRemoveStmt(nullcheckBb, nullcheckBb->lastStmt());
- nullcheckBb->SetKindAndTarget(BBJ_ALWAYS, typeCheckNotNeeded ? fallbackBb : typeChecksBbs[0]);
- fgRemoveRefPred(lastBb, nullcheckBb);
+ fgRemoveRefPred(nullcheckBb->GetTrueEdge());
+ nullcheckBb->SetKindAndTargetEdge(BBJ_ALWAYS, nullcheckBb->GetFalseEdge());
}
// Bonus step: merge prevBb with nullcheckBb as they are likely to be mergeable
diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp
index f9cb5af17925..1e6a573aa7b0 100644
--- a/src/coreclr/jit/ifconversion.cpp
+++ b/src/coreclr/jit/ifconversion.cpp
@@ -739,7 +739,7 @@ bool OptIfConversionDsc::optIfConvert()
// Update the flow from the original block.
m_comp->fgRemoveAllRefPreds(m_startBlock->GetFalseTarget(), m_startBlock);
- m_startBlock->SetKindAndTarget(BBJ_ALWAYS, m_startBlock->GetTrueTarget());
+ m_startBlock->SetKindAndTargetEdge(BBJ_ALWAYS, m_startBlock->GetTrueEdge());
#ifdef DEBUG
if (m_comp->verbose)
diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp
index 5469904c4ce5..72f301c63fda 100644
--- a/src/coreclr/jit/importer.cpp
+++ b/src/coreclr/jit/importer.cpp
@@ -2020,13 +2020,14 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H
{
// Create extra basic block for the spill
//
- BasicBlock* newBlk = fgNewBBbefore(BBJ_ALWAYS, hndBlk, /* extendRegion */ true, /* jumpDest */ hndBlk);
+ BasicBlock* newBlk = fgNewBBbefore(BBJ_ALWAYS, hndBlk, /* extendRegion */ true);
newBlk->SetFlags(BBF_IMPORTED | BBF_DONT_REMOVE | BBF_NONE_QUIRK);
newBlk->inheritWeight(hndBlk);
newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
FlowEdge* const newEdge = fgAddRefPred(hndBlk, newBlk);
newEdge->setLikelihood(1.0);
+ newBlk->SetTargetEdge(newEdge);
// Spill into a temp.
unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
@@ -2493,7 +2494,7 @@ GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom)
void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
{
- block->SetKindAndTarget(BBJ_THROW);
+ block->SetKindAndTargetEdge(BBJ_THROW);
block->SetFlags(BBF_FAILED_VERIFICATION);
block->RemoveFlags(BBF_IMPORTED);
@@ -4404,10 +4405,9 @@ void Compiler::impImportLeave(BasicBlock* block)
callBlock = block;
assert(callBlock->HasInitializedTarget());
- fgRemoveRefPred(callBlock->GetTarget(), callBlock);
+ fgRemoveRefPred(callBlock->GetTargetEdge());
- // callBlock will call the finally handler. Convert the BBJ_LEAVE to BBJ_CALLFINALLY.
- callBlock->SetKindAndTarget(BBJ_CALLFINALLY, HBtab->ebdHndBeg);
+ // callBlock will call the finally handler. This will be set up later.
if (endCatches)
{
@@ -4429,16 +4429,16 @@ void Compiler::impImportLeave(BasicBlock* block)
// Calling the finally block.
- // callBlock will call the finally handler
- callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step, HBtab->ebdHndBeg);
+ // callBlock will call the finally handler. This will be set up later.
+ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
// step's jump target shouldn't be set yet
assert(!step->HasInitializedTarget());
// the previous call to a finally returns to this call (to the next finally in the chain)
- step->SetTarget(callBlock);
FlowEdge* const newEdge = fgAddRefPred(callBlock, step);
newEdge->setLikelihood(1.0);
+ step->SetTargetEdge(newEdge);
// The new block will inherit this block's weight.
callBlock->inheritWeight(block);
@@ -4486,10 +4486,9 @@ void Compiler::impImportLeave(BasicBlock* block)
unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
assert(finallyNesting <= compHndBBtabCount);
- assert(callBlock->KindIs(BBJ_CALLFINALLY));
- assert(callBlock->TargetIs(HBtab->ebdHndBeg));
- FlowEdge* const newEdge = fgAddRefPred(callBlock->GetTarget(), callBlock);
+ FlowEdge* const newEdge = fgAddRefPred(HBtab->ebdHndBeg, callBlock);
newEdge->setLikelihood(1.0);
+ callBlock->SetKindAndTargetEdge(BBJ_CALLFINALLY, newEdge);
GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
endLFinStmt = gtNewStmt(endLFin);
@@ -4532,16 +4531,16 @@ void Compiler::impImportLeave(BasicBlock* block)
// Insert a new BB either in the try region indicated by tryIndex or
// the handler region indicated by leaveTarget->bbHndIndex,
// depending on which is the inner region.
- BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step, leaveTarget);
+ BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
finalStep->SetFlags(BBF_KEEP_BBJ_ALWAYS);
// step's jump target shouldn't be set yet
assert(!step->HasInitializedTarget());
- step->SetTarget(finalStep);
{
FlowEdge* const newEdge = fgAddRefPred(finalStep, step);
newEdge->setLikelihood(1.0);
+ step->SetTargetEdge(newEdge);
}
// The new block will inherit this block's weight.
@@ -4574,6 +4573,7 @@ void Compiler::impImportLeave(BasicBlock* block)
{
FlowEdge* const newEdge = fgAddRefPred(leaveTarget, finalStep);
newEdge->setLikelihood(1.0);
+ finalStep->SetTargetEdge(newEdge);
}
// Queue up the jump target for importing
@@ -4688,12 +4688,13 @@ void Compiler::impImportLeave(BasicBlock* block)
assert((step == block) || !step->HasInitializedTarget());
if (step == block)
{
- fgRemoveRefPred(step->GetTarget(), step);
+ fgRemoveRefPred(step->GetTargetEdge());
}
- step->SetTarget(exitBlock); // the previous step (maybe a call to a nested finally, or a nested catch
- // exit) returns to this block
+
FlowEdge* const newEdge = fgAddRefPred(exitBlock, step);
newEdge->setLikelihood(1.0);
+ step->SetTargetEdge(newEdge); // the previous step (maybe a call to a nested finally, or a nested catch
+ // exit) returns to this block
// The new block will inherit this block's weight.
exitBlock->inheritWeight(block);
@@ -4728,17 +4729,16 @@ void Compiler::impImportLeave(BasicBlock* block)
(HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
unsigned callFinallyHndIndex =
(HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
- callBlock =
- fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block, HBtab->ebdHndBeg);
+ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
// Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
// the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
// which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
// next block, and flow optimizations will remove it.
- fgRemoveRefPred(block->GetTarget(), block);
- block->SetKindAndTarget(BBJ_ALWAYS, callBlock);
+ fgRemoveRefPred(block->GetTargetEdge());
FlowEdge* const newEdge = fgAddRefPred(callBlock, block);
newEdge->setLikelihood(1.0);
+ block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
// The new block will inherit this block's weight.
callBlock->inheritWeight(block);
@@ -4758,10 +4758,9 @@ void Compiler::impImportLeave(BasicBlock* block)
callBlock = block;
assert(callBlock->HasInitializedTarget());
- fgRemoveRefPred(callBlock->GetTarget(), callBlock);
+ fgRemoveRefPred(callBlock->GetTargetEdge());
- // callBlock will call the finally handler. Convert the BBJ_LEAVE to BBJ_CALLFINALLY
- callBlock->SetKindAndTarget(BBJ_CALLFINALLY, HBtab->ebdHndBeg);
+// callBlock will call the finally handler. This will be set up later.
#ifdef DEBUG
if (verbose)
@@ -4804,11 +4803,12 @@ void Compiler::impImportLeave(BasicBlock* block)
BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
if (step == block)
{
- fgRemoveRefPred(step->GetTarget(), step);
+ fgRemoveRefPred(step->GetTargetEdge());
}
- step->SetTarget(step2);
+
FlowEdge* const newEdge = fgAddRefPred(step2, step);
newEdge->setLikelihood(1.0);
+ step->SetTargetEdge(newEdge);
step2->inheritWeight(block);
step2->CopyFlags(block, BBF_RUN_RARELY);
step2->SetFlags(BBF_IMPORTED);
@@ -4841,16 +4841,16 @@ void Compiler::impImportLeave(BasicBlock* block)
assert((step == block) || !step->HasInitializedTarget());
// callBlock will call the finally handler
- callBlock =
- fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step, HBtab->ebdHndBeg);
+ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
if (step == block)
{
- fgRemoveRefPred(step->GetTarget(), step);
+ fgRemoveRefPred(step->GetTargetEdge());
}
- step->SetTarget(callBlock); // the previous call to a finally returns to this call (to the next
- // finally in the chain)
+
FlowEdge* const newEdge = fgAddRefPred(callBlock, step);
newEdge->setLikelihood(1.0);
+ step->SetTargetEdge(newEdge); // the previous call to a finally returns to this call (to the next
+ // finally in the chain)
// The new block will inherit this block's weight.
callBlock->inheritWeight(block);
@@ -4884,10 +4884,9 @@ void Compiler::impImportLeave(BasicBlock* block)
}
#endif
- assert(callBlock->KindIs(BBJ_CALLFINALLY));
- assert(callBlock->TargetIs(HBtab->ebdHndBeg));
- FlowEdge* const newEdge = fgAddRefPred(callBlock->GetTarget(), callBlock);
+ FlowEdge* const newEdge = fgAddRefPred(HBtab->ebdHndBeg, callBlock);
newEdge->setLikelihood(1.0);
+ callBlock->SetKindAndTargetEdge(BBJ_CALLFINALLY, newEdge);
}
else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
!jitIsBetween(jmpAddr, tryBeg, tryEnd))
@@ -4951,11 +4950,12 @@ void Compiler::impImportLeave(BasicBlock* block)
if (step == block)
{
- fgRemoveRefPred(step->GetTarget(), step);
+ fgRemoveRefPred(step->GetTargetEdge());
}
- step->SetTarget(catchStep);
+
FlowEdge* const newEdge = fgAddRefPred(catchStep, step);
newEdge->setLikelihood(1.0);
+ step->SetTargetEdge(newEdge);
// The new block will inherit this block's weight.
catchStep->inheritWeight(block);
@@ -5006,11 +5006,11 @@ void Compiler::impImportLeave(BasicBlock* block)
if (step == block)
{
- fgRemoveRefPred(step->GetTarget(), step);
+ fgRemoveRefPred(step->GetTargetEdge());
}
- step->SetTarget(leaveTarget); // this is the ultimate destination of the LEAVE
FlowEdge* const newEdge = fgAddRefPred(leaveTarget, step);
newEdge->setLikelihood(1.0);
+ step->SetTargetEdge(newEdge); // this is the ultimate destination of the LEAVE
#ifdef DEBUG
if (verbose)
@@ -5069,10 +5069,11 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
// will be treated as pair and handled correctly.
if (block->KindIs(BBJ_CALLFINALLY))
{
- BasicBlock* dupBlock = BasicBlock::New(this, BBJ_CALLFINALLY, block->GetTarget());
+ BasicBlock* dupBlock = BasicBlock::New(this);
dupBlock->CopyFlags(block);
- FlowEdge* const newEdge = fgAddRefPred(dupBlock->GetTarget(), dupBlock);
+ FlowEdge* const newEdge = fgAddRefPred(block->GetTarget(), dupBlock);
newEdge->setLikelihood(1.0);
+ dupBlock->SetKindAndTargetEdge(BBJ_CALLFINALLY, newEdge);
dupBlock->copyEHRegion(block);
dupBlock->bbCatchTyp = block->bbCatchTyp;
@@ -5101,10 +5102,10 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
fgInitBBLookup();
- fgRemoveRefPred(block->GetTarget(), block);
- block->SetKindAndTarget(BBJ_LEAVE, fgLookupBB(jmpAddr));
- FlowEdge* const newEdge = fgAddRefPred(block->GetTarget(), block);
+ fgRemoveRefPred(block->GetTargetEdge());
+ FlowEdge* const newEdge = fgAddRefPred(fgLookupBB(jmpAddr), block);
newEdge->setLikelihood(1.0);
+ block->SetKindAndTargetEdge(BBJ_LEAVE, newEdge);
// We will leave the BBJ_ALWAYS block we introduced. When it's reimported
// the BBJ_ALWAYS block will be unreachable, and will be removed after. The
@@ -5427,8 +5428,7 @@ GenTree* Compiler::impCastClassOrIsInstToTree(
assert(op1->TypeGet() == TYP_REF);
// Optimistically assume the jit should expand this as an inline test
- bool shouldExpandInline = true;
- bool isClassExact = info.compCompHnd->isExactType(pResolvedToken->hClass);
+ bool isClassExact = info.compCompHnd->isExactType(pResolvedToken->hClass);
// ECMA-335 III.4.3: If typeTok is a nullable type, Nullable<T>, it is interpreted as "boxed" T
// We can convert constant-ish tokens of nullable to its underlying type.
@@ -5437,7 +5437,6 @@ GenTree* Compiler::impCastClassOrIsInstToTree(
if (isClassExact && !(info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_SHAREDINST))
{
CORINFO_CLASS_HANDLE hClass = info.compCompHnd->getTypeForBox(pResolvedToken->hClass);
-
if (hClass != pResolvedToken->hClass)
{
bool runtimeLookup;
@@ -5447,101 +5446,34 @@ GenTree* Compiler::impCastClassOrIsInstToTree(
}
}
- // Profitability check.
- //
- // Don't bother with inline expansion when jit is trying to generate code quickly
- if (opts.OptimizationDisabled())
- {
- // not worth the code expansion if jitting fast or in a rarely run block
- shouldExpandInline = false;
- }
- else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
- {
- // not worth creating an untracked local variable
- shouldExpandInline = false;
- }
- else if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && (JitConfig.JitProfileCasts() == 1))
- {
- // Optimizations are enabled but we're still instrumenting (including casts)
- if (isCastClass && !isClassExact)
- {
- // Usually, we make a speculative assumption that it makes sense to expand castclass
- // even for non-sealed classes, but let's rely on PGO in this specific case
- shouldExpandInline = false;
- }
- }
-
- if (shouldExpandInline && compCurBB->isRunRarely())
- {
- // For cold blocks we only expand castclass against exact classes because it's cheap
- shouldExpandInline = isCastClass && isClassExact;
- }
-
- // Pessimistically assume the jit cannot expand this as an inline test
- bool canExpandInline = false;
- bool reversedMTCheck = false;
- const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
-
- CORINFO_CLASS_HANDLE exactCls = NO_CLASS_HANDLE;
+ const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
- // By default, we assume it's 50/50 with the slow path.
- unsigned fastPathLikelihood = 50;
-
- // Legality check.
- //
- // Not all classclass/isinst operations can be inline expanded.
- // Check legality only if an inline expansion is desirable.
- if (shouldExpandInline)
+ bool shouldExpandEarly = false;
+ const bool tooManyLocals = (((op1->gtFlags & GTF_GLOB_EFFECT) != 0) && lvaHaveManyLocals());
+ if (isClassExact && opts.OptimizationEnabled() && !compCurBB->isRunRarely() && !tooManyLocals)
{
- if (isCastClass)
+ // TODO-InlineCast: Fix size regressions for these two cases if they're moved to the
+ // late cast expansion path and remove this early expansion entirely.
+ if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
{
- // Jit can only inline expand CHKCASTCLASS and CHKCASTARRAY helpers.
- canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTARRAY);
-
- // For ChkCastAny we ignore cases where the class is known to be abstract or is an interface.
- if (helper == CORINFO_HELP_CHKCASTANY)
- {
- const bool isAbstract = (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) &
- (CORINFO_FLG_INTERFACE | CORINFO_FLG_ABSTRACT)) != 0;
- canExpandInline = !isAbstract;
- }
+ shouldExpandEarly = true;
}
- else if ((helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_ISINSTANCEOFARRAY))
+ else if (helper == CORINFO_HELP_ISINSTANCEOFARRAY && !op2->IsIconHandle(GTF_ICON_CLASS_HDL))
{
- // If the class is exact, the jit can expand the IsInst check inline.
- canExpandInline = isClassExact;
+ shouldExpandEarly = true;
}
}
- bool expandInline = canExpandInline && shouldExpandInline;
-
- if ((helper == CORINFO_HELP_ISINSTANCEOFCLASS) && isClassExact)
- {
- // TODO-InlineCast: isinst against exact class
- // It's already supported by the late cast expansion phase, but
- // produces unexpected size regressions in some cases.
- }
- else if (!isCastClass && !op2->IsIconHandle(GTF_ICON_CLASS_HDL))
- {
- // TODO-InlineCast: isinst against Class<_Canon>
- }
- else
- {
- // Expand later
- expandInline = false;
- }
-
- if (!expandInline)
+ if (!shouldExpandEarly)
{
- JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
- canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
+ JITDUMP("\nImporting %s as call because %s\n", isCastClass ? "castclass" : "isinst");
// If we CSE this class handle we prevent assertionProp from making SubType assertions
// so instead we force the CSE logic to not consider CSE-ing this class handle.
//
op2->gtFlags |= GTF_DONT_CSE;
-
- GenTreeCall* call = gtNewHelperCallNode(helper, TYP_REF, op2, op1);
+ GenTreeCall* call = gtNewHelperCallNode(helper, TYP_REF, op2, op1);
+ call->gtCastHelperILOffset = ilOffset;
// Instrument this castclass/isinst
if ((JitConfig.JitClassProfiling() > 0) && impIsCastHelperEligibleForClassProbe(call) && !isClassExact &&
@@ -5567,128 +5499,41 @@ GenTree* Compiler::impCastClassOrIsInstToTree(
return call;
}
- JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
-
- impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
-
- GenTree* temp;
- GenTree* condMT;
- //
- // expand the methodtable match:
- //
- // condMT ==> GT_NE
- // / \.
- // GT_IND op2 (typically CNS_INT)
- // |
- // op1Copy
- //
-
- // This can replace op1 with a GT_COMMA that evaluates op1 into a local
- //
- op1 = impCloneExpr(op1, &temp, CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
- //
- // op1 is now known to be a non-complex tree
- // thus we can use gtClone(op1) from now on
- //
-
- GenTree* op2Var = op2;
- if (isCastClass && (exactCls == NO_CLASS_HANDLE))
- {
- // if exactCls is not null we won't have to clone op2 (it will be used only for the fallback)
- op2Var = fgInsertCommaFormTemp(&op2);
- lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
- }
- temp = gtNewMethodTableLookup(temp);
- condMT =
- gtNewOperNode(GT_NE, TYP_INT, temp, (exactCls != NO_CLASS_HANDLE) ? gtNewIconEmbClsHndNode(exactCls) : op2);
-
- GenTree* condNull;
- //
- // expand the null check:
- //
- // condNull ==> GT_EQ
- // / \.
- // op1Copy CNS_INT
- // null
- //
- condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewNull());
-
- //
- // expand the true and false trees for the condMT
- //
- GenTree* condFalse = reversedMTCheck ? gtNewNull() : gtClone(op1);
- GenTree* condTrue;
- if (isCastClass)
- {
- assert((helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTARRAY) ||
- (helper == CORINFO_HELP_CHKCASTANY) || (helper == CORINFO_HELP_CHKCASTINTERFACE));
+ JITDUMP("\nExpanding isinst inline\n");
- CorInfoHelpFunc specialHelper = helper;
- if ((helper == CORINFO_HELP_CHKCASTCLASS) &&
- ((exactCls == nullptr) || (exactCls == gtGetHelperArgClassHandle(op2))))
- {
- // use the special helper that skips the cases checked by our inlined cast
- specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
- }
- condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, op2Var, gtClone(op1));
- }
- else
- {
- condTrue = gtNewIconNode(0, TYP_REF);
- }
+ impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling "));
- GenTreeQmark* qmarkMT;
- //
- // Generate first QMARK - COLON tree
+ // Now we import it as two QMark nodes representing this:
//
- // qmarkMT ==> GT_QMARK
- // / \.
- // condMT GT_COLON
- // / \.
- // condFalse condTrue
+ // tmp = op1;
+ // if (tmp != null) // qmarkNull
+ // {
+ // if (tmp->pMT == op2) // qmarkMT
+ // result = tmp;
+ // else
+ // result = null;
+ // }
+ // else
+ // result = null;
//
- temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
- qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp->AsColon());
- qmarkMT->SetThenNodeLikelihood(fastPathLikelihood);
-
- if (isCastClass && isClassExact && condTrue->OperIs(GT_CALL))
- {
- if (helper == CORINFO_HELP_CHKCASTCLASS)
- {
- // condTrue is used only for throwing InvalidCastException in case of casting to an exact class.
- condTrue->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN;
- // defer calling setMethodHasNoReturnCalls until qmark expasion
- }
- }
+ // Spill op1 if it's a complex expression
+ GenTree* op1Clone;
+ op1 = impCloneExpr(op1, &op1Clone, CHECK_SPILL_ALL, nullptr DEBUGARG("ISINST eval op1"));
- GenTree* qmarkNull;
- //
- // Generate second QMARK - COLON tree
- //
- // qmarkNull ==> GT_QMARK
- // / \.
- // condNull GT_COLON
- // / \.
- // qmarkMT op1Copy
- //
- temp = new (this, GT_COLON) GenTreeColon(TYP_REF, reversedMTCheck ? gtNewNull() : gtClone(op1), qmarkMT);
- qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp->AsColon());
+ GenTreeOp* condMT = gtNewOperNode(GT_NE, TYP_INT, gtNewMethodTableLookup(op1Clone), op2);
+ GenTreeOp* condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewNull());
+ GenTreeQmark* qmarkMT = gtNewQmarkNode(TYP_REF, condMT, gtNewColonNode(TYP_REF, gtNewNull(), gtClone(op1)));
+ GenTreeQmark* qmarkNull = gtNewQmarkNode(TYP_REF, condNull, gtNewColonNode(TYP_REF, gtNewNull(), qmarkMT));
// Make QMark node a top level node by spilling it.
- unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
- impStoreTemp(tmp, qmarkNull, CHECK_SPILL_NONE);
+ const unsigned result = lvaGrabTemp(true DEBUGARG("spilling qmarkNull"));
+ impStoreTemp(result, qmarkNull, CHECK_SPILL_NONE);
- // TODO-CQ: Is it possible op1 has a better type?
- //
// See also gtGetHelperCallClassHandle where we make the same
// determination for the helper call variants.
- LclVarDsc* lclDsc = lvaGetDesc(tmp);
- assert(lclDsc->lvSingleDef == 0);
- lclDsc->lvSingleDef = 1;
- JITDUMP("Marked V%02u as a single def temp\n", tmp);
- lvaSetClass(tmp, pResolvedToken->hClass);
- return gtNewLclvNode(tmp, TYP_REF);
+ lvaSetClass(result, pResolvedToken->hClass);
+ return gtNewLclvNode(result, TYP_REF);
}
#ifndef DEBUG
@@ -6021,7 +5866,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
// Change block to BBJ_THROW so we won't trigger importation of successors.
//
- block->SetKindAndTarget(BBJ_THROW);
+ block->SetKindAndTargetEdge(BBJ_THROW);
// If this method has a explicit generic context, the only uses of it may be in
// the IL for this block. So assume it's used.
@@ -7337,14 +7182,14 @@ void Compiler::impImportBlockCode(BasicBlock* block)
// We may have already modified `block`'s jump kind, if this is a re-importation.
//
bool jumpToNextOptimization = false;
- if (block->KindIs(BBJ_COND) && block->TrueTargetIs(block->GetFalseTarget()))
+ if (block->KindIs(BBJ_COND) && block->TrueEdgeIs(block->GetFalseEdge()))
{
JITDUMP(FMT_BB " always branches to " FMT_BB ", changing to BBJ_ALWAYS\n", block->bbNum,
block->GetFalseTarget()->bbNum);
- fgRemoveRefPred(block->GetFalseTarget(), block);
+ fgRemoveRefPred(block->GetFalseEdge());
block->SetKind(BBJ_ALWAYS);
- // TODO-NoFallThrough: Once bbFalseTarget can diverge from bbNext, it may not make sense to
+ // TODO-NoFallThrough: Once false target can diverge from bbNext, it may not make sense to
// set BBF_NONE_QUIRK
block->SetFlags(BBF_NONE_QUIRK);
@@ -7416,21 +7261,24 @@ void Compiler::impImportBlockCode(BasicBlock* block)
{
JITDUMP("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n",
block->GetTrueTarget()->bbNum);
- fgRemoveRefPred(block->GetFalseTarget(), block);
+ fgRemoveRefPred(block->GetFalseEdge());
block->SetKind(BBJ_ALWAYS);
}
else
{
- // TODO-NoFallThrough: Update once bbFalseTarget can diverge from bbNext
+ // TODO-NoFallThrough: Update once false target can diverge from bbNext
assert(block->NextIs(block->GetFalseTarget()));
JITDUMP("\nThe block jumps to the next " FMT_BB "\n", block->Next()->bbNum);
- fgRemoveRefPred(block->GetTrueTarget(), block);
- block->SetKindAndTarget(BBJ_ALWAYS, block->Next());
+ fgRemoveRefPred(block->GetTrueEdge());
+ block->SetKindAndTargetEdge(BBJ_ALWAYS, block->GetFalseEdge());
- // TODO-NoFallThrough: Once bbFalseTarget can diverge from bbNext, it may not make sense
+ // TODO-NoFallThrough: Once false target can diverge from bbNext, it may not make sense
// to set BBF_NONE_QUIRK
block->SetFlags(BBF_NONE_QUIRK);
}
+
+ FlowEdge* const edge = fgGetPredForBlock(block->GetTarget(), block);
+ edge->setLikelihood(1.0);
}
break;
@@ -7599,14 +7447,14 @@ void Compiler::impImportBlockCode(BasicBlock* block)
// We may have already modified `block`'s jump kind, if this is a re-importation.
//
bool jumpToNextOptimization = false;
- if (block->KindIs(BBJ_COND) && block->TrueTargetIs(block->GetFalseTarget()))
+ if (block->KindIs(BBJ_COND) && block->TrueEdgeIs(block->GetFalseEdge()))
{
JITDUMP(FMT_BB " always branches to " FMT_BB ", changing to BBJ_ALWAYS\n", block->bbNum,
block->GetFalseTarget()->bbNum);
- fgRemoveRefPred(block->GetFalseTarget(), block);
+ fgRemoveRefPred(block->GetFalseEdge());
block->SetKind(BBJ_ALWAYS);
- // TODO-NoFallThrough: Once bbFalseTarget can diverge from bbNext, it may not make sense to
+ // TODO-NoFallThrough: Once false target can diverge from bbNext, it may not make sense to
// set BBF_NONE_QUIRK
block->SetFlags(BBF_NONE_QUIRK);
@@ -7691,7 +7539,8 @@ void Compiler::impImportBlockCode(BasicBlock* block)
if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1)))
{
// transform the basic block into a BBJ_ALWAYS
- block->SetKindAndTarget(BBJ_ALWAYS, curEdge->getDestinationBlock());
+ block->SetKindAndTargetEdge(BBJ_ALWAYS, curEdge);
+ curEdge->setLikelihood(1.0);
foundVal = true;
}
else
@@ -10295,9 +10144,9 @@ void Compiler::impImportBlockCode(BasicBlock* block)
}
#endif
- op3 = impPopStack().val; // Size
- op2 = impPopStack().val; // Value / Src addr
- op1 = impPopStack().val; // Dst addr
+ op3 = gtFoldExpr(impPopStack().val); // Size
+ op2 = gtFoldExpr(impPopStack().val); // Value / Src addr
+ op1 = impPopStack().val; // Dst addr
if (op3->IsCnsIntOrI())
{
@@ -10340,36 +10189,35 @@ void Compiler::impImportBlockCode(BasicBlock* block)
op3 = gtNewCastNode(TYP_LONG, op3, /* fromUnsigned */ true, TYP_LONG);
}
-// TODO: enable for X86 as well, it currently doesn't support memset/memcpy helpers
-// Then, get rid of GT_STORE_DYN_BLK entirely.
-#ifndef TARGET_X86
- const unsigned helper = opcode == CEE_INITBLK ? CORINFO_HELP_MEMSET : CORINFO_HELP_MEMCPY;
- if (isVolatile)
+ GenTreeCall* call;
+ if (opcode == CEE_INITBLK)
{
- // Wrap with memory barriers: full-barrier + call + load-barrier
- impAppendTree(gtNewMemoryBarrier(), CHECK_SPILL_ALL, impCurStmtDI);
- impAppendTree(gtNewHelperCallNode(helper, TYP_VOID, op1, op2, op3), CHECK_SPILL_ALL,
- impCurStmtDI);
- op1 = gtNewMemoryBarrier(true);
+ // value is zero -> memzero, otherwise -> memset
+ if (op2->IsIntegralConst(0))
+ {
+ call = gtNewHelperCallNode(CORINFO_HELP_MEMZERO, TYP_VOID, op1, op3);
+ }
+ else
+ {
+ call = gtNewHelperCallNode(CORINFO_HELP_MEMSET, TYP_VOID, op1, op2, op3);
+ }
}
else
{
- op1 = gtNewHelperCallNode(helper, TYP_VOID, op1, op2, op3);
+ call = gtNewHelperCallNode(CORINFO_HELP_MEMCPY, TYP_VOID, op1, op2, op3);
}
-#else
- if (opcode == CEE_INITBLK)
+
+ if (isVolatile)
{
- if (!op2->IsIntegralConst(0))
- {
- op2 = gtNewOperNode(GT_INIT_VAL, TYP_INT, op2);
- }
+ // Wrap with memory barriers: full-barrier + call + load-barrier
+ impAppendTree(gtNewMemoryBarrier(), CHECK_SPILL_ALL, impCurStmtDI);
+ impAppendTree(call, CHECK_SPILL_ALL, impCurStmtDI);
+ op1 = gtNewMemoryBarrier(true);
}
else
{
- op2 = gtNewIndir(TYP_STRUCT, op2);
+ op1 = call;
}
- op1 = gtNewStoreDynBlkNode(op1, op2, op3, indirFlags);
-#endif
}
goto SPILL_APPEND;
}
diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp
index b557c629e355..4fb75b7d3980 100644
--- a/src/coreclr/jit/importercalls.cpp
+++ b/src/coreclr/jit/importercalls.cpp
@@ -98,6 +98,10 @@ var_types Compiler::impImportCall(OPCODE opcode,
CORINFO_SIG_INFO calliSig;
NewCallArg extraArg;
+ // Swift calls that might throw use a SwiftError* arg that requires additional IR to handle,
+ // so if we're importing a Swift call, look for this type in the signature
+ CallArg* swiftErrorArg = nullptr;
+
/*-------------------------------------------------------------------------
* First create the call node
*/
@@ -651,6 +655,8 @@ var_types Compiler::impImportCall(OPCODE opcode,
if (call->gtFlags & GTF_CALL_UNMANAGED)
{
+ assert(call->IsCall());
+
// We set up the unmanaged call by linking the frame, disabling GC, etc
// This needs to be cleaned up on return.
// In addition, native calls have different normalization rules than managed code
@@ -663,7 +669,7 @@ var_types Compiler::impImportCall(OPCODE opcode,
checkForSmallType = true;
- impPopArgsForUnmanagedCall(call->AsCall(), sig);
+ impPopArgsForUnmanagedCall(call->AsCall(), sig, &swiftErrorArg);
goto DONE;
}
@@ -1290,7 +1296,7 @@ DONE_CALL:
impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtDI);
}
else if (JitConfig.JitProfileValues() && call->IsCall() &&
- call->AsCall()->IsSpecialIntrinsic(this, NI_System_Buffer_Memmove))
+ call->AsCall()->IsSpecialIntrinsic(this, NI_System_SpanHelpers_Memmove))
{
if (opts.IsOptimizedWithProfile())
{
@@ -1485,6 +1491,15 @@ DONE_CALL:
impPushOnStack(call, tiRetVal);
}
+#ifdef SWIFT_SUPPORT
+ // If call is a Swift call with error handling, append additional IR
+ // to handle storing the error register's value post-call.
+ if (swiftErrorArg != nullptr)
+ {
+ impAppendSwiftErrorStore(call->AsCall(), swiftErrorArg);
+ }
+#endif // SWIFT_SUPPORT
+
return callRetTyp;
}
#ifdef _PREFAST_
@@ -1555,7 +1570,7 @@ GenTree* Compiler::impDuplicateWithProfiledArg(GenTreeCall* call, IL_OFFSET ilOf
unsigned argNum = 0;
ssize_t minValue = 0;
ssize_t maxValue = 0;
- if (call->IsSpecialIntrinsic(this, NI_System_Buffer_Memmove))
+ if (call->IsSpecialIntrinsic(this, NI_System_SpanHelpers_Memmove))
{
// dst(0), src(1), len(2)
argNum = 2;
@@ -1822,7 +1837,9 @@ GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugI
/*****************************************************************************/
-void Compiler::impPopArgsForUnmanagedCall(GenTreeCall* call, CORINFO_SIG_INFO* sig)
+void Compiler::impPopArgsForUnmanagedCall(GenTreeCall* call,
+ CORINFO_SIG_INFO* sig,
+ /* OUT */ CallArg** swiftErrorArg)
{
assert(call->gtFlags & GTF_CALL_UNMANAGED);
@@ -1842,10 +1859,91 @@ void Compiler::impPopArgsForUnmanagedCall(GenTreeCall* call, CORINFO_SIG_INFO* s
if (call->unmgdCallConv == CorInfoCallConvExtension::Thiscall)
{
- assert(argsToReverse);
+ assert(argsToReverse != 0);
argsToReverse--;
}
+#ifdef SWIFT_SUPPORT
+ unsigned short swiftErrorIndex = sig->numArgs;
+ unsigned short swiftSelfIndex = sig->numArgs;
+
+ // We are importing an unmanaged Swift call, which might require special parameter handling
+ if (call->unmgdCallConv == CorInfoCallConvExtension::Swift)
+ {
+ bool checkEntireStack = false;
+
+ // Check the signature of the Swift call for the special types
+ CORINFO_ARG_LIST_HANDLE sigArg = sig->args;
+
+ for (unsigned short argIndex = 0; argIndex < sig->numArgs;
+ sigArg = info.compCompHnd->getArgNext(sigArg), argIndex++)
+ {
+ CORINFO_CLASS_HANDLE argClass;
+ CorInfoType argType = strip(info.compCompHnd->getArgType(sig, sigArg, &argClass));
+ bool argIsByrefOrPtr = false;
+
+ if ((argType == CORINFO_TYPE_BYREF) || (argType == CORINFO_TYPE_PTR))
+ {
+ argClass = info.compCompHnd->getArgClass(sig, sigArg);
+ argType = info.compCompHnd->getChildType(argClass, &argClass);
+ argIsByrefOrPtr = true;
+ }
+
+ if ((argType != CORINFO_TYPE_VALUECLASS) || !info.compCompHnd->isIntrinsicType(argClass))
+ {
+ continue;
+ }
+
+ const char* namespaceName;
+ const char* className = info.compCompHnd->getClassNameFromMetadata(argClass, &namespaceName);
+
+ if ((strcmp(className, "SwiftError") == 0) &&
+ (strcmp(namespaceName, "System.Runtime.InteropServices.Swift") == 0))
+ {
+ // For error handling purposes, we expect a pointer/reference to a SwiftError to be passed
+ if (!argIsByrefOrPtr)
+ {
+ BADCODE("Expected SwiftError pointer/reference, got struct");
+ }
+
+ if (swiftErrorIndex != sig->numArgs)
+ {
+ BADCODE("Duplicate SwiftError* parameter");
+ }
+
+ swiftErrorIndex = argIndex;
+ checkEntireStack = true;
+ }
+ else if ((strcmp(className, "SwiftSelf") == 0) &&
+ (strcmp(namespaceName, "System.Runtime.InteropServices.Swift") == 0))
+ {
+ // We expect a SwiftSelf struct to be passed, not a pointer/reference
+ if (argIsByrefOrPtr)
+ {
+ BADCODE("Expected SwiftSelf struct, got pointer/reference");
+ }
+
+ if (swiftSelfIndex != sig->numArgs)
+ {
+ BADCODE("Duplicate SwiftSelf parameter");
+ }
+
+ swiftSelfIndex = argIndex;
+ }
+ // TODO: Handle SwiftAsync
+ }
+
+ // Don't need to reverse args for Swift calls
+ argsToReverse = 0;
+
+ // If using SwiftError*, check entire stack for side effects
+ if (checkEntireStack)
+ {
+ impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("Spill for swift calls"));
+ }
+ }
+#endif // SWIFT_SUPPORT
+
#ifndef TARGET_X86
// Don't reverse args on ARM or x64 - first four args always placed in regs in order
argsToReverse = 0;
@@ -1892,6 +1990,7 @@ void Compiler::impPopArgsForUnmanagedCall(GenTreeCall* call, CORINFO_SIG_INFO* s
assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
}
+ unsigned short argIndex = 0;
for (CallArg& arg : call->gtArgs.Args())
{
GenTree* argNode = arg.GetEarlyNode();
@@ -1914,9 +2013,60 @@ void Compiler::impPopArgsForUnmanagedCall(GenTreeCall* call, CORINFO_SIG_INFO* s
assert(!"*** invalid IL: gc ref passed to unmanaged call");
}
}
+
+#ifdef SWIFT_SUPPORT
+ if (argIndex == swiftErrorIndex)
+ {
+ // Found the SwiftError* arg
+ assert(swiftErrorArg != nullptr);
+ *swiftErrorArg = &arg;
+ }
+ else if (argIndex == swiftSelfIndex)
+ {
+ // Found the SwiftSelf arg
+ arg.SetWellKnownArg(WellKnownArg::SwiftSelf);
+ }
+// TODO: SwiftAsync
+#endif // SWIFT_SUPPORT
+
+ argIndex++;
}
}
+#ifdef SWIFT_SUPPORT
+//------------------------------------------------------------------------
+// impAppendSwiftErrorStore: Append IR to store the Swift error register value
+// to the SwiftError* argument specified by swiftErrorArg, post-Swift call
+//
+// Arguments:
+// call - the Swift call
+// swiftErrorArg - the SwiftError* argument passed to call
+//
+void Compiler::impAppendSwiftErrorStore(GenTreeCall* call, CallArg* const swiftErrorArg)
+{
+ assert(call != nullptr);
+ assert(call->unmgdCallConv == CorInfoCallConvExtension::Swift);
+ assert(swiftErrorArg != nullptr);
+
+ GenTree* const argNode = swiftErrorArg->GetNode();
+ assert(argNode != nullptr);
+
+ // Store the error register value to where the SwiftError* points to
+ GenTree* errorRegNode = new (this, GT_SWIFT_ERROR) GenTree(GT_SWIFT_ERROR, TYP_I_IMPL);
+ errorRegNode->SetHasOrderingSideEffect();
+ errorRegNode->gtFlags |= (GTF_CALL | GTF_GLOB_REF);
+
+ GenTreeStoreInd* swiftErrorStore = gtNewStoreIndNode(argNode->TypeGet(), argNode, errorRegNode);
+ impAppendTree(swiftErrorStore, CHECK_SPILL_ALL, impCurStmtDI, false);
+
+ // Indicate the error register will be checked after this call returns
+ call->gtCallMoreFlags |= GTF_CALL_M_SWIFT_ERROR_HANDLING;
+
+ // Swift call isn't going to use the SwiftError* arg, so don't bother emitting it
+ call->gtArgs.Remove(swiftErrorArg);
+}
+#endif // SWIFT_SUPPORT
+
//------------------------------------------------------------------------
// impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
// with a GT_COPYBLK node.
@@ -2761,7 +2911,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
betterToExpand = true;
break;
- case NI_System_Buffer_Memmove:
+ case NI_System_SpanHelpers_Memmove:
case NI_System_SpanHelpers_SequenceEqual:
// We're going to instrument these
betterToExpand = opts.IsInstrumented();
@@ -3982,7 +4132,8 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
case NI_System_Text_UTF8Encoding_UTF8EncodingSealed_ReadUtf8:
case NI_System_SpanHelpers_SequenceEqual:
- case NI_System_Buffer_Memmove:
+ case NI_System_SpanHelpers_ClearWithoutReferences:
+ case NI_System_SpanHelpers_Memmove:
{
if (sig->sigInst.methInstCount == 0)
{
@@ -3993,6 +4144,16 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
break;
}
+ case NI_System_SpanHelpers_Fill:
+ {
+ if (sig->sigInst.methInstCount == 1)
+ {
+ // We'll try to unroll this in lower for constant input.
+ isSpecial = true;
+ }
+ break;
+ }
+
case NI_System_BitConverter_DoubleToInt64Bits:
{
GenTree* op1 = impStackTop().val;
@@ -5618,8 +5779,7 @@ void Compiler::impCheckForPInvokeCall(
// return here without inlining the native call.
if (unmanagedCallConv == CorInfoCallConvExtension::Managed ||
unmanagedCallConv == CorInfoCallConvExtension::Fastcall ||
- unmanagedCallConv == CorInfoCallConvExtension::FastcallMemberFunction ||
- unmanagedCallConv == CorInfoCallConvExtension::Swift)
+ unmanagedCallConv == CorInfoCallConvExtension::FastcallMemberFunction)
{
return;
}
@@ -8863,13 +9023,6 @@ NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
result = NI_System_BitConverter_Int64BitsToDouble;
}
}
- else if (strcmp(className, "Buffer") == 0)
- {
- if (strcmp(methodName, "Memmove") == 0)
- {
- result = NI_System_Buffer_Memmove;
- }
- }
break;
}
@@ -9021,6 +9174,18 @@ NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
{
result = NI_System_SpanHelpers_SequenceEqual;
}
+ else if (strcmp(methodName, "Fill") == 0)
+ {
+ result = NI_System_SpanHelpers_Fill;
+ }
+ else if (strcmp(methodName, "ClearWithoutReferences") == 0)
+ {
+ result = NI_System_SpanHelpers_ClearWithoutReferences;
+ }
+ else if (strcmp(methodName, "Memmove") == 0)
+ {
+ result = NI_System_SpanHelpers_Memmove;
+ }
}
else if (strcmp(className, "String") == 0)
{
diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp
index fbbe08d66fc3..b4d9448e6abb 100644
--- a/src/coreclr/jit/indirectcalltransformer.cpp
+++ b/src/coreclr/jit/indirectcalltransformer.cpp
@@ -207,6 +207,10 @@ private:
{
remainderBlock = compiler->fgSplitBlockAfterStatement(currBlock, stmt);
remainderBlock->SetFlags(BBF_INTERNAL);
+
+ // We will be adding more blocks after currBlock, so remove edge to remainderBlock.
+ //
+ compiler->fgRemoveRefPred(currBlock->GetTargetEdge());
}
virtual void CreateCheck(uint8_t checkIdx) = 0;
@@ -218,13 +222,12 @@ private:
// Arguments:
// jumpKind - jump kind for the new basic block
// insertAfter - basic block, after which compiler has to insert the new one.
- // jumpDest - jump target for the new basic block. Defaults to nullptr.
//
// Return Value:
// new basic block.
- BasicBlock* CreateAndInsertBasicBlock(BBKinds jumpKind, BasicBlock* insertAfter, BasicBlock* jumpDest = nullptr)
+ BasicBlock* CreateAndInsertBasicBlock(BBKinds jumpKind, BasicBlock* insertAfter)
{
- BasicBlock* block = compiler->fgNewBBafter(jumpKind, insertAfter, true, jumpDest);
+ BasicBlock* block = compiler->fgNewBBafter(jumpKind, insertAfter, true);
block->SetFlags(BBF_IMPORTED);
return block;
}
@@ -267,37 +270,38 @@ private:
assert(compiler->fgPredsComputed);
// currBlock
- compiler->fgRemoveRefPred(remainderBlock, currBlock);
-
if (checkBlock != currBlock)
{
assert(currBlock->KindIs(BBJ_ALWAYS));
- currBlock->SetTarget(checkBlock);
FlowEdge* const newEdge = compiler->fgAddRefPred(checkBlock, currBlock);
newEdge->setLikelihood(1.0);
+ currBlock->SetTargetEdge(newEdge);
}
// checkBlock
// Todo: get likelihoods right
//
assert(checkBlock->KindIs(BBJ_ALWAYS));
- checkBlock->SetCond(elseBlock, thenBlock);
FlowEdge* const thenEdge = compiler->fgAddRefPred(thenBlock, checkBlock);
thenEdge->setLikelihood(0.5);
FlowEdge* const elseEdge = compiler->fgAddRefPred(elseBlock, checkBlock);
elseEdge->setLikelihood(0.5);
+ checkBlock->SetCond(elseEdge, thenEdge);
// thenBlock
- assert(thenBlock->TargetIs(remainderBlock));
{
+ assert(thenBlock->KindIs(BBJ_ALWAYS));
FlowEdge* const newEdge = compiler->fgAddRefPred(remainderBlock, thenBlock);
newEdge->setLikelihood(1.0);
+ thenBlock->SetTargetEdge(newEdge);
}
// elseBlock
{
+ assert(elseBlock->KindIs(BBJ_ALWAYS));
FlowEdge* const newEdge = compiler->fgAddRefPred(remainderBlock, elseBlock);
newEdge->setLikelihood(1.0);
+ elseBlock->SetTargetEdge(newEdge);
}
}
@@ -376,7 +380,7 @@ private:
{
assert(checkIdx == 0);
- checkBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, currBlock, currBlock->Next());
+ checkBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, currBlock);
checkBlock->SetFlags(BBF_NONE_QUIRK);
GenTree* fatPointerMask = new (compiler, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, FAT_POINTER_MASK);
GenTree* fptrAddressCopy = compiler->gtCloneExpr(fptrAddress);
@@ -395,7 +399,7 @@ private:
virtual void CreateThen(uint8_t checkIdx)
{
assert(remainderBlock != nullptr);
- thenBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, checkBlock, remainderBlock);
+ thenBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, checkBlock);
Statement* copyOfOriginalStmt = compiler->gtCloneStmt(stmt);
compiler->fgInsertStmtAtEnd(thenBlock, copyOfOriginalStmt);
}
@@ -405,7 +409,7 @@ private:
//
virtual void CreateElse()
{
- elseBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock, thenBlock->Next());
+ elseBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock);
elseBlock->SetFlags(BBF_NONE_QUIRK);
GenTree* fixedFptrAddress = GetFixedFptrAddress();
@@ -567,10 +571,7 @@ private:
{
assert(compiler->fgPredsComputed);
- // currBlock
- compiler->fgRemoveRefPred(remainderBlock, currBlock);
-
- // The rest of chaining is done in-place.
+ // Chaining is done in-place.
}
virtual void SetWeights()
@@ -601,23 +602,25 @@ private:
checkBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock);
checkFallsThrough = false;
- // Calculate the total likelihood for this check as a sum of likelihoods
- // of all previous candidates (thenBlocks)
- unsigned checkLikelihood = 100;
- for (uint8_t previousCandidate = 0; previousCandidate < checkIdx; previousCandidate++)
- {
- checkLikelihood -= origCall->GetGDVCandidateInfo(previousCandidate)->likelihood;
- }
+ // We computed the "then" likelihood in CreateThen, so we
+ // just use that to figure out the "else" likelihood.
+ //
+ assert(prevCheckBlock->KindIs(BBJ_ALWAYS));
+ assert(prevCheckBlock->JumpsToNext());
+ FlowEdge* const prevCheckThenEdge = prevCheckBlock->GetTargetEdge();
+ assert(prevCheckThenEdge->hasLikelihood());
+ weight_t checkLikelihood = max(0.0, 1.0 - prevCheckThenEdge->getLikelihood());
- // Make sure we didn't overflow
- assert(checkLikelihood <= 100);
- weight_t checkLikelihoodWt = ((weight_t)checkLikelihood) / 100.0;
+ JITDUMP("Level %u Check block " FMT_BB " success likelihood " FMT_WT "\n", checkIdx, checkBlock->bbNum,
+ checkLikelihood);
// prevCheckBlock is expected to jump to this new check (if its type check doesn't succeed)
- prevCheckBlock->SetCond(checkBlock, prevCheckBlock->Next());
- FlowEdge* const checkEdge = compiler->fgAddRefPred(checkBlock, prevCheckBlock);
- checkEdge->setLikelihood(checkLikelihoodWt);
- checkBlock->inheritWeightPercentage(currBlock, checkLikelihood);
+ //
+ FlowEdge* const prevCheckCheckEdge = compiler->fgAddRefPred(checkBlock, prevCheckBlock);
+ prevCheckCheckEdge->setLikelihood(checkLikelihood);
+ checkBlock->inheritWeight(prevCheckBlock);
+ checkBlock->scaleBBWeight(checkLikelihood);
+ prevCheckBlock->SetCond(prevCheckCheckEdge, prevCheckThenEdge);
}
// Find last arg with a side effect. All args with any effect
@@ -1016,25 +1019,59 @@ private:
{
// Compute likelihoods
//
- unsigned const thenLikelihood = origCall->GetGDVCandidateInfo(checkIdx)->likelihood;
- weight_t thenLikelihoodWt = min(((weight_t)thenLikelihood) / 100.0, 100.0);
- weight_t elseLikelihoodWt = max(1.0 - thenLikelihoodWt, 0.0);
+ // If this is the first check the likelihood is just the candidate likelihood.
+ // If there are multiple checks things are a bit more complicated.
+ //
+ // Say we had three candidates with likelihoods 0.5, 0.3, and 0.1.
+ //
+ // The first one's likelihood is 0.5.
+ //
+ // The second one (given that we've already checked the first and failed)
+ // is (0.3) / (1.0 - 0.5) = 0.6.
+ //
+ // The third one is (0.1) / (1.0 - (0.5 + 0.3)) = (0.1)/(0.2) = 0.5
+ //
+ // So to figure out the proper divisor, we start with 1.0 and subtract off each
+ // preceeding test's likelihood of success.
+ //
+ unsigned const thenLikelihood = origCall->GetGDVCandidateInfo(checkIdx)->likelihood;
+ unsigned baseLikelihood = 0;
+
+ for (uint8_t i = 0; i < checkIdx; i++)
+ {
+ baseLikelihood += origCall->GetGDVCandidateInfo(i)->likelihood;
+ }
+ assert(baseLikelihood < 100);
+ baseLikelihood = 100 - baseLikelihood;
+
+ weight_t adjustedThenLikelihood = min(((weight_t)thenLikelihood) / baseLikelihood, 100.0);
+ JITDUMP("For check in " FMT_BB ": orig likelihood " FMT_WT ", base likelihood " FMT_WT
+ ", adjusted likelihood " FMT_WT "\n",
+ checkBlock->bbNum, (weight_t)thenLikelihood / 100.0, (weight_t)baseLikelihood / 100.0,
+ adjustedThenLikelihood);
// thenBlock always jumps to remainderBlock
- thenBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, checkBlock, remainderBlock);
+ //
+ thenBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, checkBlock);
thenBlock->CopyFlags(currBlock, BBF_SPLIT_GAINED);
- thenBlock->inheritWeightPercentage(currBlock, thenLikelihood);
+ thenBlock->inheritWeight(currBlock);
+ thenBlock->scaleBBWeight(adjustedThenLikelihood);
+ FlowEdge* const thenRemainderEdge = compiler->fgAddRefPred(remainderBlock, thenBlock);
+ thenBlock->SetTargetEdge(thenRemainderEdge);
+ thenRemainderEdge->setLikelihood(1.0);
- // Also, thenBlock has a single pred - last checkBlock
+ // thenBlock has a single pred - last checkBlock.
+ //
assert(checkBlock->KindIs(BBJ_ALWAYS));
- checkBlock->SetTarget(thenBlock);
+ FlowEdge* const checkThenEdge = compiler->fgAddRefPred(thenBlock, checkBlock);
+ checkThenEdge->setLikelihood(adjustedThenLikelihood);
+ checkBlock->SetTargetEdge(checkThenEdge);
checkBlock->SetFlags(BBF_NONE_QUIRK);
assert(checkBlock->JumpsToNext());
- FlowEdge* const thenEdge = compiler->fgAddRefPred(thenBlock, checkBlock);
- thenEdge->setLikelihood(thenLikelihoodWt);
- FlowEdge* const elseEdge = compiler->fgAddRefPred(remainderBlock, thenBlock);
- elseEdge->setLikelihood(elseLikelihoodWt);
+ // We will set the "else edge" likelihood in CreateElse later,
+ // based on the thenEdge likelihood.
+ //
DevirtualizeCall(thenBlock, checkIdx);
}
@@ -1043,28 +1080,28 @@ private:
//
virtual void CreateElse()
{
- // Calculate the likelihood of the else block as a remainder of the sum
- // of all the other likelihoods.
- unsigned elseLikelihood = 100;
- for (uint8_t i = 0; i < origCall->GetInlineCandidatesCount(); i++)
- {
- elseLikelihood -= origCall->GetGDVCandidateInfo(i)->likelihood;
- }
- // Make sure it didn't overflow
- assert(elseLikelihood <= 100);
- weight_t elseLikelihoodDbl = ((weight_t)elseLikelihood) / 100.0;
-
- elseBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock, thenBlock->Next());
+ elseBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock);
elseBlock->CopyFlags(currBlock, BBF_SPLIT_GAINED);
elseBlock->SetFlags(BBF_NONE_QUIRK);
+ // We computed the "then" likelihood in CreateThen, so we
+ // just use that to figure out the "else" likelihood.
+ //
+ assert(checkBlock->KindIs(BBJ_ALWAYS));
+ FlowEdge* const checkThenEdge = checkBlock->GetTargetEdge();
+ assert(checkThenEdge->hasLikelihood());
+ weight_t elseLikelihood = max(0.0, 1.0 - checkThenEdge->getLikelihood());
+
// CheckBlock flows into elseBlock unless we deal with the case
// where we know the last check is always true (in case of "exact" GDV)
+ //
if (!checkFallsThrough)
{
- checkBlock->SetCond(elseBlock, checkBlock->Next());
- FlowEdge* const checkEdge = compiler->fgAddRefPred(elseBlock, checkBlock);
- checkEdge->setLikelihood(elseLikelihoodDbl);
+ assert(checkBlock->KindIs(BBJ_ALWAYS));
+ assert(checkBlock->JumpsToNext());
+ FlowEdge* const checkElseEdge = compiler->fgAddRefPred(elseBlock, checkBlock);
+ checkElseEdge->setLikelihood(elseLikelihood);
+ checkBlock->SetCond(checkElseEdge, checkThenEdge);
}
else
{
@@ -1072,16 +1109,23 @@ private:
// and is NativeAOT-only, we just assume the unreached block will be removed
// by other phases.
assert(origCall->gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT_EXACT);
+
+ // Since we're not modifying the check block a BBJ_COND, update the
+ // then edge likelihood (it should already have the right value, so perhaps assert instead?)
+ //
+ checkThenEdge->setLikelihood(1.0);
}
// elseBlock always flows into remainderBlock
- FlowEdge* const elseEdge = compiler->fgAddRefPred(remainderBlock, elseBlock);
- elseEdge->setLikelihood(1.0);
+ FlowEdge* const elseRemainderEdge = compiler->fgAddRefPred(remainderBlock, elseBlock);
+ elseRemainderEdge->setLikelihood(1.0);
+ elseBlock->SetTargetEdge(elseRemainderEdge);
// Remove everything related to inlining from the original call
origCall->ClearInlineInfo();
- elseBlock->inheritWeightPercentage(currBlock, elseLikelihood);
+ elseBlock->inheritWeight(checkBlock);
+ elseBlock->scaleBBWeight(elseLikelihood);
GenTreeCall* call = origCall;
Statement* newStmt = compiler->gtNewStmt(call, stmt->GetDebugInfo());
@@ -1176,12 +1220,12 @@ private:
// Finally, rewire the cold block to jump to the else block,
// not fall through to the check block.
//
- FlowEdge* const oldEdge = compiler->fgRemoveRefPred(checkBlock, coldBlock);
- coldBlock->SetKindAndTarget(BBJ_ALWAYS, elseBlock);
- compiler->fgAddRefPred(elseBlock, coldBlock, oldEdge);
+ compiler->fgRemoveRefPred(coldBlock->GetTargetEdge());
+ FlowEdge* const newEdge = compiler->fgAddRefPred(elseBlock, coldBlock, coldBlock->GetTargetEdge());
+ coldBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
}
- // When the current candidate hads sufficiently high likelihood, scan
+ // When the current candidate has sufficiently high likelihood, scan
// the remainer block looking for another GDV candidate.
//
// (also consider: if currBlock has sufficiently high execution frequency)
diff --git a/src/coreclr/jit/inductionvariableopts.cpp b/src/coreclr/jit/inductionvariableopts.cpp
new file mode 100644
index 000000000000..59e5b6a0d497
--- /dev/null
+++ b/src/coreclr/jit/inductionvariableopts.cpp
@@ -0,0 +1,686 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+// This file contains code to optimize induction variables in loops based on
+// scalar evolution analysis (see scev.h and scev.cpp for more information
+// about the scalar evolution analysis).
+//
+// Currently the only optimization done is widening of primary induction
+// variables from 32 bits into 64 bits. This is generally only profitable on
+// x64 that does not allow zero extension of 32-bit values in addressing modes
+// (in contrast, arm64 does have the capability of including zero extensions in
+// addressing modes). For x64 this saves a zero extension for every array
+// access inside the loop, in exchange for some widening or narrowing stores
+// outside the loop:
+// - To make sure the new widened IV starts at the right value it is
+// initialized to the value of the narrow IV outside the loop (either in the
+// preheader or at the def location of the narrow IV). Usually the start
+// value is a constant, in which case the widened IV is just initialized to
+// the constant value.
+// - If the narrow IV is used after the loop we need to store it back from
+// the widened IV in the exits. We depend on liveness sets to figure out
+// which exits to insert IR into.
+//
+// These steps ensure that the wide IV has the right value to begin with and
+// the old narrow IV still has the right value after the loop. Additionally,
+// we must replace every use of the narrow IV inside the loop with the widened
+// IV. This is done by a traversal of the IR inside the loop. We do not
+// actually widen the uses of the IV; rather, we keep all uses and defs as
+// 32-bit, which the backend is able to handle efficiently on x64. Because of
+// this we do not need to worry about overflow.
+//
+
+#include "jitpch.h"
+#include "scev.h"
+
+//------------------------------------------------------------------------
+// optCanSinkWidenedIV: Check to see if we are able to sink a store to the old
+// local into the exits of a loop if we decide to widen.
+//
+// Parameters:
+// lclNum - The primary induction variable
+// loop - The loop
+//
+// Returns:
+// True if we can sink a store to the old local after widening.
+//
+// Remarks:
+// This handles the situation where the primary induction variable is used
+// after the loop. In those cases we need to store the widened local back
+// into the old one in the exits where the IV variable is live.
+//
+// We are able to sink when none of the exits are critical blocks, in the
+// sense that all their predecessors must come from inside the loop. Loop
+// exit canonicalization guarantees this for regular exit blocks. It is not
+// guaranteed for exceptional exits, but we do not expect to widen IVs that
+// are live into exceptional exits since those are marked DNER which makes it
+// unprofitable anyway.
+//
+// Note that there may be natural loops that have not had their regular exits
+// canonicalized at the time when IV opts run, in particular if RBO/assertion
+// prop makes a previously unnatural loop natural. This function accounts for
+// and rejects these cases.
+//
+bool Compiler::optCanSinkWidenedIV(unsigned lclNum, FlowGraphNaturalLoop* loop)
+{
+ LclVarDsc* dsc = lvaGetDesc(lclNum);
+
+ BasicBlockVisit result = loop->VisitRegularExitBlocks([=](BasicBlock* exit) {
+
+ if (!VarSetOps::IsMember(this, exit->bbLiveIn, dsc->lvVarIndex))
+ {
+ JITDUMP(" Exit " FMT_BB " does not need a sink; V%02u is not live-in\n", exit->bbNum, lclNum);
+ return BasicBlockVisit::Continue;
+ }
+
+ for (BasicBlock* pred : exit->PredBlocks())
+ {
+ if (!loop->ContainsBlock(pred))
+ {
+ JITDUMP(" Cannot safely sink widened version of V%02u into exit " FMT_BB " of " FMT_LP
+ "; it has a non-loop pred " FMT_BB "\n",
+ lclNum, exit->bbNum, loop->GetIndex(), pred->bbNum);
+ return BasicBlockVisit::Abort;
+ }
+ }
+
+ return BasicBlockVisit::Continue;
+ });
+
+#ifdef DEBUG
+ // We currently do not expect to ever widen IVs that are live into
+ // exceptional exits. Such IVs are expected to have been marked DNER
+ // previously (EH write-thru is only for single def locals) which makes it
+ // unprofitable. If this ever changes we need some more expansive handling
+ // here.
+ loop->VisitLoopBlocks([=](BasicBlock* block) {
+
+ block->VisitAllSuccs(this, [=](BasicBlock* succ) {
+ if (!loop->ContainsBlock(succ) && bbIsHandlerBeg(succ))
+ {
+ assert(!VarSetOps::IsMember(this, succ->bbLiveIn, dsc->lvVarIndex) &&
+ "Candidate IV for widening is live into exceptional exit");
+ }
+
+ return BasicBlockVisit::Continue;
+ });
+
+ return BasicBlockVisit::Continue;
+ });
+#endif
+
+ return result != BasicBlockVisit::Abort;
+}
+
+//------------------------------------------------------------------------
+// optIsIVWideningProfitable: Check to see if IV widening is profitable.
+//
+// Parameters:
+// lclNum - The primary induction variable
+// initBlock - The block in where the new IV would be initialized
+// initedToConstant - Whether or not the new IV will be initialized to a constant
+// loop - The loop
+// ivUses - Statements in which "lclNum" appears will be added to this list
+//
+//
+// Returns:
+// True if IV widening is profitable.
+//
+// Remarks:
+// IV widening is generally profitable when it allows us to remove casts
+// inside the loop. However, it may also introduce other reg-reg moves:
+// 1. We may need to store the narrow IV into the wide one in the
+// preheader. This is necessary when the start value is not constant. If
+// the start value _is_ constant then we assume that the constant store to
+// the narrow local will be a DCE'd.
+// 2. We need to store the wide IV back into the narrow one in each of
+// the exits where the narrow IV is live-in.
+//
+bool Compiler::optIsIVWideningProfitable(unsigned lclNum,
+ BasicBlock* initBlock,
+ bool initedToConstant,
+ FlowGraphNaturalLoop* loop,
+ ArrayStack<Statement*>& ivUses)
+{
+ for (FlowGraphNaturalLoop* otherLoop : m_loops->InReversePostOrder())
+ {
+ if (otherLoop == loop)
+ continue;
+
+ for (Statement* stmt : otherLoop->GetHeader()->Statements())
+ {
+ if (!stmt->IsPhiDefnStmt())
+ break;
+
+ if (stmt->GetRootNode()->AsLclVarCommon()->GetLclNum() == lclNum)
+ {
+ JITDUMP(" V%02u has a phi [%06u] in " FMT_LP "'s header " FMT_BB "\n", lclNum,
+ dspTreeID(stmt->GetRootNode()), otherLoop->GetIndex(), otherLoop->GetHeader()->bbNum);
+ // TODO-CQ: We can legally widen these cases, but LSRA is
+ // unhappy about some of the lifetimes we create when we do
+ // this. This particularly affects cloned loops.
+ return false;
+ }
+ }
+ }
+
+ const weight_t ExtensionCost = 2;
+ const int ExtensionSize = 3;
+
+ weight_t savedCost = 0;
+ int savedSize = 0;
+
+ loop->VisitLoopBlocks([&](BasicBlock* block) {
+ for (Statement* stmt : block->NonPhiStatements())
+ {
+ bool hasUse = false;
+ int numExtensions = 0;
+ for (GenTree* node : stmt->TreeList())
+ {
+ if (!node->OperIs(GT_CAST))
+ {
+ hasUse |= node->OperIsLocal() && (node->AsLclVarCommon()->GetLclNum() == lclNum);
+ continue;
+ }
+
+ GenTreeCast* cast = node->AsCast();
+ if ((cast->gtCastType != TYP_LONG) || !cast->IsUnsigned() || cast->gtOverflow())
+ {
+ continue;
+ }
+
+ GenTree* op = cast->CastOp();
+ if (!op->OperIs(GT_LCL_VAR) || (op->AsLclVarCommon()->GetLclNum() != lclNum))
+ {
+ continue;
+ }
+
+ // If this is already the source of a store then it is going to be
+ // free in our backends regardless.
+ GenTree* parent = node->gtGetParent(nullptr);
+ if ((parent != nullptr) && parent->OperIs(GT_STORE_LCL_VAR))
+ {
+ continue;
+ }
+
+ numExtensions++;
+ }
+
+ if (hasUse)
+ {
+ ivUses.Push(stmt);
+ }
+
+ if (numExtensions > 0)
+ {
+ JITDUMP(" Found %d zero extensions in " FMT_STMT "\n", numExtensions, stmt->GetID());
+
+ savedSize += numExtensions * ExtensionSize;
+ savedCost += numExtensions * block->getBBWeight(this) * ExtensionCost;
+ }
+ }
+
+ return BasicBlockVisit::Continue;
+ });
+
+ if (!initedToConstant)
+ {
+ // We will need to store the narrow IV into the wide one in the init
+ // block. We only cost this when init value is not a constant since
+ // otherwise we assume that constant initialization of the narrow local
+ // will be DCE'd.
+ savedSize -= ExtensionSize;
+ savedCost -= initBlock->getBBWeight(this) * ExtensionCost;
+ }
+
+ // Now account for the cost of sinks.
+ LclVarDsc* dsc = lvaGetDesc(lclNum);
+ loop->VisitRegularExitBlocks([&](BasicBlock* exit) {
+ if (VarSetOps::IsMember(this, exit->bbLiveIn, dsc->lvVarIndex))
+ {
+ savedSize -= ExtensionSize;
+ savedCost -= exit->getBBWeight(this) * ExtensionCost;
+ }
+ return BasicBlockVisit::Continue;
+ });
+
+ const weight_t ALLOWED_SIZE_REGRESSION_PER_CYCLE_IMPROVEMENT = 2;
+ weight_t cycleImprovementPerInvoc = savedCost / fgFirstBB->getBBWeight(this);
+
+ JITDUMP(" Estimated cycle improvement: " FMT_WT " cycles per invocation\n", cycleImprovementPerInvoc);
+ JITDUMP(" Estimated size improvement: %d bytes\n", savedSize);
+
+ if ((cycleImprovementPerInvoc > 0) &&
+ ((cycleImprovementPerInvoc * ALLOWED_SIZE_REGRESSION_PER_CYCLE_IMPROVEMENT) >= -savedSize))
+ {
+ JITDUMP(" Widening is profitable (cycle improvement)\n");
+ return true;
+ }
+
+ const weight_t ALLOWED_CYCLE_REGRESSION_PER_SIZE_IMPROVEMENT = 0.01;
+
+ if ((savedSize > 0) && ((savedSize * ALLOWED_CYCLE_REGRESSION_PER_SIZE_IMPROVEMENT) >= -cycleImprovementPerInvoc))
+ {
+ JITDUMP(" Widening is profitable (size improvement)\n");
+ return true;
+ }
+
+ JITDUMP(" Widening is not profitable\n");
+ return false;
+}
+
+//------------------------------------------------------------------------
+// optSinkWidenedIV: Create stores back to the narrow IV in the exits where
+// that is necessary.
+//
+// Parameters:
+// lclNum - Narrow version of primary induction variable
+// newLclNum - Wide version of primary induction variable
+// loop - The loop
+//
+// Returns:
+// True if any store was created in any exit block.
+//
+void Compiler::optSinkWidenedIV(unsigned lclNum, unsigned newLclNum, FlowGraphNaturalLoop* loop)
+{
+ LclVarDsc* dsc = lvaGetDesc(lclNum);
+ loop->VisitRegularExitBlocks([=](BasicBlock* exit) {
+ if (!VarSetOps::IsMember(this, exit->bbLiveIn, dsc->lvVarIndex))
+ {
+ return BasicBlockVisit::Continue;
+ }
+
+ GenTree* narrowing = gtNewCastNode(TYP_INT, gtNewLclvNode(newLclNum, TYP_LONG), false, TYP_INT);
+ GenTree* store = gtNewStoreLclVarNode(lclNum, narrowing);
+ Statement* newStmt = fgNewStmtFromTree(store);
+ JITDUMP("Narrow IV local V%02u live into exit block " FMT_BB "; sinking a narrowing\n", lclNum, exit->bbNum);
+ DISPSTMT(newStmt);
+ fgInsertStmtAtBeg(exit, newStmt);
+
+ return BasicBlockVisit::Continue;
+ });
+}
+
+//------------------------------------------------------------------------
+// optReplaceWidenedIV: Replace uses of the narrow IV with the wide IV in the
+// specified statement.
+//
+// Parameters:
+// lclNum - Narrow version of primary induction variable
+// newLclNum - Wide version of primary induction variable
+// stmt - The statement to replace uses in.
+//
+void Compiler::optReplaceWidenedIV(unsigned lclNum, unsigned ssaNum, unsigned newLclNum, Statement* stmt)
+{
+ struct ReplaceVisitor : GenTreeVisitor<ReplaceVisitor>
+ {
+ private:
+ unsigned m_lclNum;
+ unsigned m_ssaNum;
+ unsigned m_newLclNum;
+
+ bool IsLocal(GenTreeLclVarCommon* tree)
+ {
+ return (tree->GetLclNum() == m_lclNum) &&
+ ((m_ssaNum == SsaConfig::RESERVED_SSA_NUM) || (tree->GetSsaNum() == m_ssaNum));
+ }
+
+ public:
+ bool MadeChanges = false;
+
+ enum
+ {
+ DoPreOrder = true,
+ };
+
+ ReplaceVisitor(Compiler* comp, unsigned lclNum, unsigned ssaNum, unsigned newLclNum)
+ : GenTreeVisitor(comp), m_lclNum(lclNum), m_ssaNum(ssaNum), m_newLclNum(newLclNum)
+ {
+ }
+
+ fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
+ {
+ GenTree* node = *use;
+ if (node->OperIs(GT_CAST))
+ {
+ GenTreeCast* cast = node->AsCast();
+ if ((cast->gtCastType == TYP_LONG) && cast->IsUnsigned() && !cast->gtOverflow())
+ {
+ GenTree* op = cast->CastOp();
+ if (op->OperIs(GT_LCL_VAR) && IsLocal(op->AsLclVarCommon()))
+ {
+ *use = m_compiler->gtNewLclvNode(m_newLclNum, TYP_LONG);
+ MadeChanges = true;
+ return fgWalkResult::WALK_SKIP_SUBTREES;
+ }
+ }
+ }
+ else if (node->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_LCL_FLD, GT_STORE_LCL_FLD) &&
+ IsLocal(node->AsLclVarCommon()))
+ {
+ switch (node->OperGet())
+ {
+ case GT_LCL_VAR:
+ node->AsLclVarCommon()->SetLclNum(m_newLclNum);
+ // No cast needed -- the backend allows TYP_INT uses of TYP_LONG locals.
+ break;
+ case GT_STORE_LCL_VAR:
+ {
+ node->AsLclVarCommon()->SetLclNum(m_newLclNum);
+ node->gtType = TYP_LONG;
+ node->AsLclVarCommon()->Data() =
+ m_compiler->gtNewCastNode(TYP_LONG, node->AsLclVarCommon()->Data(), true, TYP_LONG);
+ break;
+ }
+ case GT_LCL_FLD:
+ case GT_STORE_LCL_FLD:
+ assert(!"Unexpected field use for local not marked as DNER");
+ break;
+ default:
+ break;
+ }
+
+ MadeChanges = true;
+ }
+
+ return fgWalkResult::WALK_CONTINUE;
+ }
+ };
+
+ ReplaceVisitor visitor(this, lclNum, ssaNum, newLclNum);
+ visitor.WalkTree(stmt->GetRootNodePointer(), nullptr);
+ if (visitor.MadeChanges)
+ {
+ gtSetStmtInfo(stmt);
+ fgSetStmtSeq(stmt);
+ JITDUMP("New tree:\n", dspTreeID(stmt->GetRootNode()));
+ DISPTREE(stmt->GetRootNode());
+ JITDUMP("\n");
+ }
+ else
+ {
+ JITDUMP("No replacements made\n");
+ }
+}
+
+//------------------------------------------------------------------------
+// optBestEffortReplaceNarrowIVUses: Try to find and replace uses of the specified
+// SSA def with a new local.
+//
+// Parameters:
+// lclNum - Previous local
+// ssaNum - Previous local SSA num
+// newLclNum - New local to replace with
+// block - Block to replace in
+// firstStmt - First statement in "block" to start replacing in
+//
+// Remarks:
+// This function is best effort; it might not find all uses of the provided
+// SSA num, particularly because it does not follow into joins. Note that we
+// only use this to replace uses of the narrow IV outside the loop; inside
+// the loop we do ensure that all uses/defs are replaced.
+// Keeping it best-effort outside the loop is ok; there is no correctness
+// issue since we do not invalidate the value of the old narrow IV in any
+// way, but it may mean we end up leaving the narrow IV live concurrently
+// with the new widened IV, increasing register pressure.
+//
+void Compiler::optBestEffortReplaceNarrowIVUses(
+ unsigned lclNum, unsigned ssaNum, unsigned newLclNum, BasicBlock* block, Statement* firstStmt)
+{
+ JITDUMP("Replacing V%02u -> V%02u in " FMT_BB " starting at " FMT_STMT "\n", lclNum, newLclNum, block->bbNum,
+ firstStmt == nullptr ? 0 : firstStmt->GetID());
+
+ for (Statement* stmt = firstStmt; stmt != nullptr; stmt = stmt->GetNextStmt())
+ {
+ JITDUMP("Replacing V%02u -> V%02u in [%06u]\n", lclNum, newLclNum, dspTreeID(stmt->GetRootNode()));
+ DISPSTMT(stmt);
+ JITDUMP("\n");
+
+ optReplaceWidenedIV(lclNum, ssaNum, newLclNum, stmt);
+ }
+
+ block->VisitRegularSuccs(this, [=](BasicBlock* succ) {
+ if (succ->GetUniquePred(this) == block)
+ {
+ optBestEffortReplaceNarrowIVUses(lclNum, ssaNum, newLclNum, succ, succ->firstStmt());
+ }
+
+ return BasicBlockVisit::Continue;
+ });
+}
+
+//------------------------------------------------------------------------
+// optInductionVariables: Try and optimize induction variables in the method.
+//
+// Returns:
+// PhaseStatus indicating if anything changed.
+//
+PhaseStatus Compiler::optInductionVariables()
+{
+ JITDUMP("*************** In optInductionVariables()\n");
+
+#ifdef DEBUG
+ static ConfigMethodRange s_range;
+ s_range.EnsureInit(JitConfig.JitEnableInductionVariableOptsRange());
+
+ if (!s_range.Contains(info.compMethodHash()))
+ {
+ return PhaseStatus::MODIFIED_NOTHING;
+ }
+#endif
+
+ if (!fgMightHaveNaturalLoops)
+ {
+ JITDUMP(" Skipping since this method has no natural loops\n");
+ return PhaseStatus::MODIFIED_NOTHING;
+ }
+
+ bool changed = false;
+
+ // Currently we only do IV widening which generally is only profitable for
+ // x64 because arm64 addressing modes can include the zero/sign-extension
+ // of the index for free.
+ CLANG_FORMAT_COMMENT_ANCHOR;
+#if defined(TARGET_XARCH) && defined(TARGET_64BIT)
+ m_dfsTree = fgComputeDfs();
+ m_loops = FlowGraphNaturalLoops::Find(m_dfsTree);
+
+ ScalarEvolutionContext scevContext(this);
+ JITDUMP("Widening primary induction variables:\n");
+ ArrayStack<Statement*> ivUses(getAllocator(CMK_LoopIVOpts));
+ for (FlowGraphNaturalLoop* loop : m_loops->InReversePostOrder())
+ {
+ JITDUMP("Processing ");
+ DBEXEC(verbose, FlowGraphNaturalLoop::Dump(loop));
+ scevContext.ResetForLoop(loop);
+
+ int numWidened = 0;
+
+ for (Statement* stmt : loop->GetHeader()->Statements())
+ {
+ if (!stmt->IsPhiDefnStmt())
+ {
+ break;
+ }
+
+ JITDUMP("\n");
+
+ DISPSTMT(stmt);
+
+ GenTreeLclVarCommon* lcl = stmt->GetRootNode()->AsLclVarCommon();
+ LclVarDsc* lclDsc = lvaGetDesc(lcl);
+ if (lclDsc->TypeGet() != TYP_INT)
+ {
+ JITDUMP(" Type is %s, no widening to be done\n", varTypeName(lclDsc->TypeGet()));
+ continue;
+ }
+
+ // If the IV is not enregisterable then uses/defs are going to go
+ // to stack regardless. This check also filters out IVs that may be
+ // live into exceptional exits since those are always marked DNER.
+ if (lclDsc->lvDoNotEnregister)
+ {
+ JITDUMP(" V%02u is marked DNER\n", lcl->GetLclNum());
+ continue;
+ }
+
+ Scev* scev = scevContext.Analyze(loop->GetHeader(), stmt->GetRootNode());
+ if (scev == nullptr)
+ {
+ JITDUMP(" Could not analyze header PHI\n");
+ continue;
+ }
+
+ scev = scevContext.Simplify(scev);
+ JITDUMP(" => ");
+ DBEXEC(verbose, scev->Dump(this));
+ JITDUMP("\n");
+ if (!scev->OperIs(ScevOper::AddRec))
+ {
+ JITDUMP(" Not an addrec\n");
+ continue;
+ }
+
+ ScevAddRec* addRec = (ScevAddRec*)scev;
+
+ JITDUMP(" V%02u is a primary induction variable in " FMT_LP "\n", lcl->GetLclNum(), loop->GetIndex());
+
+ if (!optCanSinkWidenedIV(lcl->GetLclNum(), loop))
+ {
+ continue;
+ }
+
+ // Start value should always be an SSA use from outside the loop
+ // since we only widen primary IVs.
+ assert(addRec->Start->OperIs(ScevOper::Local));
+ ScevLocal* startLocal = (ScevLocal*)addRec->Start;
+ int64_t startConstant = 0;
+ bool initToConstant = startLocal->GetConstantValue(this, &startConstant);
+ LclSsaVarDsc* startSsaDsc = lclDsc->GetPerSsaData(startLocal->SsaNum);
+
+ BasicBlock* preheader = loop->EntryEdge(0)->getSourceBlock();
+ BasicBlock* initBlock = preheader;
+ if ((startSsaDsc->GetBlock() != nullptr) && (startSsaDsc->GetDefNode() != nullptr))
+ {
+ initBlock = startSsaDsc->GetBlock();
+ }
+
+ ivUses.Reset();
+ if (!optIsIVWideningProfitable(lcl->GetLclNum(), initBlock, initToConstant, loop, ivUses))
+ {
+ continue;
+ }
+
+ changed = true;
+
+ Statement* insertInitAfter = nullptr;
+ if (initBlock != preheader)
+ {
+ GenTree* narrowInitRoot = startSsaDsc->GetDefNode();
+ while (true)
+ {
+ GenTree* parent = narrowInitRoot->gtGetParent(nullptr);
+ if (parent == nullptr)
+ break;
+
+ narrowInitRoot = parent;
+ }
+
+ for (Statement* stmt : initBlock->Statements())
+ {
+ if (stmt->GetRootNode() == narrowInitRoot)
+ {
+ insertInitAfter = stmt;
+ break;
+ }
+ }
+
+ assert(insertInitAfter != nullptr);
+
+ if (insertInitAfter->IsPhiDefnStmt())
+ {
+ while ((insertInitAfter->GetNextStmt() != nullptr) &&
+ insertInitAfter->GetNextStmt()->IsPhiDefnStmt())
+ {
+ insertInitAfter = insertInitAfter->GetNextStmt();
+ }
+ }
+ }
+
+ Statement* initStmt = nullptr;
+ unsigned newLclNum = lvaGrabTemp(false DEBUGARG(printfAlloc("Widened IV V%02u", lcl->GetLclNum())));
+ INDEBUG(lclDsc = nullptr);
+ assert(startLocal->LclNum == lcl->GetLclNum());
+
+ if (initBlock != preheader)
+ {
+ JITDUMP("Adding initialization of new widened local to same block as reaching def outside loop, " FMT_BB
+ "\n",
+ initBlock->bbNum);
+ }
+ else
+ {
+ JITDUMP("Adding initialization of new widened local to preheader " FMT_BB "\n", initBlock->bbNum);
+ }
+
+ GenTree* initVal;
+ if (initToConstant)
+ {
+ initVal = gtNewIconNode((int64_t)(uint32_t)startConstant, TYP_LONG);
+ }
+ else
+ {
+ initVal = gtNewCastNode(TYP_LONG, gtNewLclvNode(lcl->GetLclNum(), TYP_INT), true, TYP_LONG);
+ }
+
+ GenTree* widenStore = gtNewTempStore(newLclNum, initVal);
+ initStmt = fgNewStmtFromTree(widenStore);
+ if (insertInitAfter != nullptr)
+ {
+ fgInsertStmtAfter(initBlock, insertInitAfter, initStmt);
+ }
+ else
+ {
+ fgInsertStmtNearEnd(initBlock, initStmt);
+ }
+
+ DISPSTMT(initStmt);
+ JITDUMP("\n");
+
+ JITDUMP(" Replacing uses of V%02u with widened version V%02u\n", lcl->GetLclNum(), newLclNum);
+
+ if (initStmt != nullptr)
+ {
+ JITDUMP(" Replacing on the way to the loop\n");
+ optBestEffortReplaceNarrowIVUses(lcl->GetLclNum(), startLocal->SsaNum, newLclNum, initBlock,
+ initStmt->GetNextStmt());
+ }
+
+ JITDUMP(" Replacing in the loop; %d statements with appearances\n", ivUses.Height());
+ for (int i = 0; i < ivUses.Height(); i++)
+ {
+ Statement* stmt = ivUses.Bottom(i);
+ JITDUMP("Replacing V%02u -> V%02u in [%06u]\n", lcl->GetLclNum(), newLclNum,
+ dspTreeID(stmt->GetRootNode()));
+ DISPSTMT(stmt);
+ JITDUMP("\n");
+ optReplaceWidenedIV(lcl->GetLclNum(), SsaConfig::RESERVED_SSA_NUM, newLclNum, stmt);
+ }
+
+ optSinkWidenedIV(lcl->GetLclNum(), newLclNum, loop);
+
+ numWidened++;
+ }
+
+ Metrics.WidenedIVs += numWidened;
+ if (numWidened > 0)
+ {
+ Metrics.LoopsIVWidened++;
+ }
+ }
+
+ fgInvalidateDfsTree();
+#endif
+
+ return changed ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING;
+}
diff --git a/src/coreclr/jit/instr.h b/src/coreclr/jit/instr.h
index 3120b2ac87fc..1ff07e430171 100644
--- a/src/coreclr/jit/instr.h
+++ b/src/coreclr/jit/instr.h
@@ -380,6 +380,8 @@ enum insScalableOpts : unsigned
// Removable once REG_V0 and REG_P0 are distinct
INS_SCALABLE_OPTS_UNPREDICATED, // Variants without a predicate (eg add)
INS_SCALABLE_OPTS_UNPREDICATED_WIDE, // Variants without a predicate and wide elements (eg asr)
+ INS_SCALABLE_OPTS_TO_PREDICATE, // Variants moving to a predicate from a vector (e.g. pmov)
+ INS_SCALABLE_OPTS_TO_VECTOR // Variants moving to a vector from a predicate (e.g. pmov)
};
// Maps directly to the pattern used in SVE instructions such as cntb.
diff --git a/src/coreclr/jit/jitconfigvalues.h b/src/coreclr/jit/jitconfigvalues.h
index d84c2f79b7dd..ee8320344ce4 100644
--- a/src/coreclr/jit/jitconfigvalues.h
+++ b/src/coreclr/jit/jitconfigvalues.h
@@ -481,8 +481,9 @@ CONFIG_INTEGER(JitNoRngChks, W("JitNoRngChks"), 0) // If 1, don't generate range
#if defined(OPT_CONFIG)
CONFIG_INTEGER(JitDoAssertionProp, W("JitDoAssertionProp"), 1) // Perform assertion propagation optimization
-CONFIG_INTEGER(JitDoCopyProp, W("JitDoCopyProp"), 1) // Perform copy propagation on variables that appear redundant
-CONFIG_INTEGER(JitDoEarlyProp, W("JitDoEarlyProp"), 1) // Perform Early Value Propagation
+CONFIG_INTEGER(JitDoCopyProp, W("JitDoCopyProp"), 1) // Perform copy propagation on variables that appear redundant
+CONFIG_INTEGER(JitDoOptimizeIVs, W("JitDoOptimizeIVs"), 1) // Perform optimization of induction variables
+CONFIG_INTEGER(JitDoEarlyProp, W("JitDoEarlyProp"), 1) // Perform Early Value Propagation
CONFIG_INTEGER(JitDoLoopHoisting, W("JitDoLoopHoisting"), 1) // Perform loop hoisting on loop invariant values
CONFIG_INTEGER(JitDoLoopInversion, W("JitDoLoopInversion"), 1) // Perform loop inversion on "for/while" loops
CONFIG_INTEGER(JitDoRangeAnalysis, W("JitDoRangeAnalysis"), 1) // Perform range check analysis
@@ -497,6 +498,7 @@ CONFIG_STRING(JitOnlyOptimizeRange,
W("JitOnlyOptimizeRange")) // If set, all methods that do _not_ match are forced into MinOpts
CONFIG_STRING(JitEnablePhysicalPromotionRange, W("JitEnablePhysicalPromotionRange"))
CONFIG_STRING(JitEnableCrossBlockLocalAssertionPropRange, W("JitEnableCrossBlockLocalAssertionPropRange"))
+CONFIG_STRING(JitEnableInductionVariableOptsRange, W("JitEnableInductionVariableOptsRange"))
CONFIG_INTEGER(JitDoSsa, W("JitDoSsa"), 1) // Perform Static Single Assignment (SSA) numbering on the variables
CONFIG_INTEGER(JitDoValueNumber, W("JitDoValueNumber"), 1) // Perform value numbering on method expressions
diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp
index a4eacd9069db..e7195b5c98cb 100644
--- a/src/coreclr/jit/jiteh.cpp
+++ b/src/coreclr/jit/jiteh.cpp
@@ -1984,10 +1984,11 @@ bool Compiler::fgNormalizeEHCase1()
{
// ...then we want to insert an empty, non-removable block outside the try to be the new first block of the
// handler.
- BasicBlock* newHndStart = BasicBlock::New(this, BBJ_ALWAYS, handlerStart);
+ BasicBlock* newHndStart = BasicBlock::New(this);
fgInsertBBbefore(handlerStart, newHndStart);
FlowEdge* newEdge = fgAddRefPred(handlerStart, newHndStart);
newEdge->setLikelihood(1.0);
+ newHndStart->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
// Handler begins have an extra implicit ref count.
// BasicBlock::New has already handled this for newHndStart.
@@ -2154,11 +2155,12 @@ bool Compiler::fgNormalizeEHCase2()
// We've got multiple 'try' blocks starting at the same place!
// Add a new first 'try' block for 'ehOuter' that will be outside 'eh'.
- BasicBlock* newTryStart = BasicBlock::New(this, BBJ_ALWAYS, insertBeforeBlk);
+ BasicBlock* newTryStart = BasicBlock::New(this);
newTryStart->bbRefs = 0;
fgInsertBBbefore(insertBeforeBlk, newTryStart);
FlowEdge* const newEdge = fgAddRefPred(insertBeforeBlk, newTryStart);
newEdge->setLikelihood(1.0);
+ newTryStart->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
// It's possible for a try to start at the beginning of a method. If so, we need
// to adjust the implicit ref counts as we've just created a new first bb
@@ -2346,7 +2348,7 @@ bool Compiler::fgCreateFiltersForGenericExceptions()
// Create a new bb for the fake filter
BasicBlock* handlerBb = eh->ebdHndBeg;
- BasicBlock* filterBb = BasicBlock::New(this, BBJ_EHFILTERRET, handlerBb);
+ BasicBlock* filterBb = BasicBlock::New(this);
// Now we need to spill CATCH_ARG (it should be the first thing evaluated)
GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
@@ -2376,6 +2378,7 @@ bool Compiler::fgCreateFiltersForGenericExceptions()
fgInsertBBbefore(handlerBb, filterBb);
FlowEdge* const newEdge = fgAddRefPred(handlerBb, filterBb);
newEdge->setLikelihood(1.0);
+ filterBb->SetKindAndTargetEdge(BBJ_EHFILTERRET, newEdge);
fgNewStmtAtEnd(filterBb, retFilt, handlerBb->firstStmt()->GetDebugInfo());
filterBb->bbCatchTyp = BBCT_FILTER;
@@ -2632,7 +2635,7 @@ bool Compiler::fgNormalizeEHCase3()
// Add a new last block for 'ehOuter' that will be outside the EH region with which it encloses and
// shares a 'last' pointer
- BasicBlock* newLast = BasicBlock::New(this, BBJ_ALWAYS, insertAfterBlk->Next());
+ BasicBlock* newLast = BasicBlock::New(this);
newLast->bbRefs = 0;
assert(insertAfterBlk != nullptr);
fgInsertBBafter(insertAfterBlk, newLast);
@@ -2683,6 +2686,7 @@ bool Compiler::fgNormalizeEHCase3()
newLast->SetFlags(BBF_INTERNAL | BBF_NONE_QUIRK);
FlowEdge* const newEdge = fgAddRefPred(newLast, insertAfterBlk);
newEdge->setLikelihood(1.0);
+ insertAfterBlk->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
// Move the insert pointer. More enclosing equivalent 'last' blocks will be inserted after this.
insertAfterBlk = newLast;
@@ -4325,8 +4329,8 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
#endif // FEATURE_EH_FUNCLETS
// If this is a handler for a filter, the last block of the filter will end with
- // a BBJ_EHFILTERRET block that has a bbTarget that jumps to the first block of
- // its handler. So we need to update it to keep things in sync.
+ // a BBJ_EHFILTERRET block that jumps to the first block of its handler.
+ // So we need to update it to keep things in sync.
//
if (HBtab->HasFilter())
{
@@ -4337,15 +4341,15 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
#ifdef DEBUG
if (verbose)
{
- printf("EH#%u: Updating bbTarget for filter ret block: " FMT_BB " => " FMT_BB "\n",
- ehGetIndex(HBtab), bFilterLast->bbNum, bPrev->bbNum);
+ printf("EH#%u: Updating target for filter ret block: " FMT_BB " => " FMT_BB "\n", ehGetIndex(HBtab),
+ bFilterLast->bbNum, bPrev->bbNum);
}
#endif // DEBUG
- // Change the bbTarget for bFilterLast from the old first 'block' to the new first 'bPrev'
- fgRemoveRefPred(bFilterLast->GetTarget(), bFilterLast);
- bFilterLast->SetTarget(bPrev);
+ // Change the target for bFilterLast from the old first 'block' to the new first 'bPrev'
+ fgRemoveRefPred(bFilterLast->GetTargetEdge());
FlowEdge* const newEdge = fgAddRefPred(bPrev, bFilterLast);
newEdge->setLikelihood(1.0);
+ bFilterLast->SetTargetEdge(newEdge);
}
}
diff --git a/src/coreclr/jit/jitmetadatalist.h b/src/coreclr/jit/jitmetadatalist.h
index 8b69644d1a8f..f36c15ab9991 100644
--- a/src/coreclr/jit/jitmetadatalist.h
+++ b/src/coreclr/jit/jitmetadatalist.h
@@ -33,6 +33,8 @@ JITMETADATAMETRIC(LoopsCloned, int, 0)
JITMETADATAMETRIC(LoopsUnrolled, int, 0)
JITMETADATAMETRIC(LoopAlignmentCandidates, int, 0)
JITMETADATAMETRIC(LoopsAligned, int, 0)
+JITMETADATAMETRIC(LoopsIVWidened, int, 0)
+JITMETADATAMETRIC(WidenedIVs, int, 0)
JITMETADATAMETRIC(VarsInSsa, int, 0)
JITMETADATAMETRIC(HoistedExpressions, int, 0)
JITMETADATAMETRIC(RedundantBranchesEliminated, int, JIT_METADATA_HIGHER_IS_BETTER)
diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp
index c4a4d44489f0..78fb96fe3d77 100644
--- a/src/coreclr/jit/liveness.cpp
+++ b/src/coreclr/jit/liveness.cpp
@@ -245,7 +245,6 @@ void Compiler::fgPerNodeLocalVarLiveness(GenTree* tree)
case GT_STOREIND:
case GT_STORE_BLK:
- case GT_STORE_DYN_BLK:
case GT_MEMORYBARRIER: // Similar to Volatile indirections, we must handle this as a memory def.
fgCurMemoryDef |= memoryKindSet(GcHeap, ByrefExposed);
break;
@@ -1937,7 +1936,6 @@ void Compiler::fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALAR
case GT_STOREIND:
case GT_BOUNDS_CHECK:
case GT_STORE_BLK:
- case GT_STORE_DYN_BLK:
case GT_JCMP:
case GT_JTEST:
case GT_JCC:
diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp
index 96cd8bff0159..c6d37dc507b9 100644
--- a/src/coreclr/jit/loopcloning.cpp
+++ b/src/coreclr/jit/loopcloning.cpp
@@ -860,17 +860,18 @@ BasicBlock* LoopCloneContext::CondToStmtInBlock(Compiler*
{
for (unsigned i = 0; i < conds.Size(); ++i)
{
- BasicBlock* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true, slowPreheader);
+ BasicBlock* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true);
newBlk->inheritWeight(insertAfter);
- JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, newBlk->GetTrueTarget()->bbNum);
- comp->fgAddRefPred(newBlk->GetTrueTarget(), newBlk);
+ JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, slowPreheader->bbNum);
+ FlowEdge* const trueEdge = comp->fgAddRefPred(slowPreheader, newBlk);
+ newBlk->SetTrueEdge(trueEdge);
if (insertAfter->KindIs(BBJ_COND))
{
JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", insertAfter->bbNum, newBlk->bbNum);
- insertAfter->SetFalseTarget(newBlk);
- comp->fgAddRefPred(newBlk, insertAfter);
+ FlowEdge* const falseEdge = comp->fgAddRefPred(newBlk, insertAfter);
+ insertAfter->SetFalseEdge(falseEdge);
}
JITDUMP("Adding conditions %u to " FMT_BB "\n", i, newBlk->bbNum);
@@ -894,16 +895,18 @@ BasicBlock* LoopCloneContext::CondToStmtInBlock(Compiler*
}
else
{
- BasicBlock* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true, slowPreheader);
+ BasicBlock* newBlk = comp->fgNewBBafter(BBJ_COND, insertAfter, /*extendRegion*/ true);
newBlk->inheritWeight(insertAfter);
- JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, newBlk->GetTrueTarget()->bbNum);
- comp->fgAddRefPred(newBlk->GetTrueTarget(), newBlk);
+ JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", newBlk->bbNum, slowPreheader->bbNum);
+ FlowEdge* const trueEdge = comp->fgAddRefPred(slowPreheader, newBlk);
+ newBlk->SetTrueEdge(trueEdge);
- if (insertAfter->bbFallsThrough())
+ if (insertAfter->KindIs(BBJ_COND))
{
JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", insertAfter->bbNum, newBlk->bbNum);
- comp->fgAddRefPred(newBlk, insertAfter);
+ FlowEdge* const falseEdge = comp->fgAddRefPred(newBlk, insertAfter);
+ insertAfter->SetFalseEdge(falseEdge);
}
JITDUMP("Adding conditions to " FMT_BB "\n", newBlk->bbNum);
@@ -1959,12 +1962,11 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex
// Make a new pre-header block for the fast loop.
JITDUMP("Create new preheader block for fast loop\n");
- BasicBlock* fastPreheader =
- fgNewBBafter(BBJ_ALWAYS, preheader, /*extendRegion*/ true, /*jumpDest*/ loop->GetHeader());
+ BasicBlock* fastPreheader = fgNewBBafter(BBJ_ALWAYS, preheader, /*extendRegion*/ true);
JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", fastPreheader->bbNum, preheader->bbNum);
fastPreheader->bbWeight = fastPreheader->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight;
- if (fastPreheader->JumpsToNext())
+ if (fastPreheader->NextIs(loop->GetHeader()))
{
fastPreheader->SetFlags(BBF_NONE_QUIRK);
}
@@ -1972,7 +1974,10 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex
assert(preheader->KindIs(BBJ_ALWAYS));
assert(preheader->TargetIs(loop->GetHeader()));
- fgReplacePred(loop->GetHeader(), preheader, fastPreheader);
+ FlowEdge* const oldEdge = preheader->GetTargetEdge();
+ fgReplacePred(oldEdge, fastPreheader);
+ fastPreheader->SetTargetEdge(oldEdge);
+
JITDUMP("Replace " FMT_BB " -> " FMT_BB " with " FMT_BB " -> " FMT_BB "\n", preheader->bbNum,
loop->GetHeader()->bbNum, fastPreheader->bbNum, loop->GetHeader()->bbNum);
@@ -2039,9 +2044,12 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex
// We haven't set the jump target yet
assert(slowPreheader->KindIs(BBJ_ALWAYS));
assert(!slowPreheader->HasInitializedTarget());
- slowPreheader->SetTarget(slowHeader);
- fgAddRefPred(slowHeader, slowPreheader);
+ {
+ FlowEdge* const newEdge = fgAddRefPred(slowHeader, slowPreheader);
+ slowPreheader->SetTargetEdge(newEdge);
+ }
+
JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", slowPreheader->bbNum, slowHeader->bbNum);
BasicBlock* condLast = optInsertLoopChoiceConditions(context, loop, slowPreheader, preheader);
@@ -2049,14 +2057,18 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex
// Now redirect the old preheader to jump to the first new condition that
// was inserted by the above function.
assert(preheader->KindIs(BBJ_ALWAYS));
- preheader->SetTarget(preheader->Next());
- fgAddRefPred(preheader->Next(), preheader);
+
+ {
+ FlowEdge* const newEdge = fgAddRefPred(preheader->Next(), preheader);
+ preheader->SetTargetEdge(newEdge);
+ }
+
preheader->SetFlags(BBF_NONE_QUIRK);
// And make sure we insert a pred link for the final fallthrough into the fast preheader.
assert(condLast->NextIs(fastPreheader));
- condLast->SetFalseTarget(fastPreheader);
- fgAddRefPred(fastPreheader, condLast);
+ FlowEdge* const falseEdge = fgAddRefPred(fastPreheader, condLast);
+ condLast->SetFalseEdge(falseEdge);
}
//-------------------------------------------------------------------------
diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp
index 18d7f388e908..29ca8148dbe9 100644
--- a/src/coreclr/jit/lower.cpp
+++ b/src/coreclr/jit/lower.cpp
@@ -570,8 +570,6 @@ GenTree* Lowering::LowerNode(GenTree* node)
LowerStoreSingleRegCallStruct(node->AsBlk());
break;
}
- FALLTHROUGH;
- case GT_STORE_DYN_BLK:
LowerBlockStoreCommon(node->AsBlk());
break;
@@ -861,7 +859,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node)
{
JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum);
noway_assert(comp->opts.OptimizationDisabled());
- originalSwitchBB->SetKindAndTarget(BBJ_ALWAYS, jumpTab[0]->getDestinationBlock());
+ originalSwitchBB->SetKindAndTargetEdge(BBJ_ALWAYS, jumpTab[0]);
if (originalSwitchBB->JumpsToNext())
{
@@ -951,7 +949,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node)
// originalSwitchBB is now a BBJ_ALWAYS, and there is a predecessor edge in afterDefaultCondBlock
// representing the fall-through flow from originalSwitchBB.
assert(originalSwitchBB->KindIs(BBJ_ALWAYS));
- assert(originalSwitchBB->NextIs(afterDefaultCondBlock));
+ assert(originalSwitchBB->TargetIs(afterDefaultCondBlock));
+ assert(originalSwitchBB->JumpsToNext());
assert(afterDefaultCondBlock->KindIs(BBJ_SWITCH));
assert(afterDefaultCondBlock->GetSwitchTargets()->bbsHasDefault);
assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet.
@@ -962,10 +961,10 @@ GenTree* Lowering::LowerSwitch(GenTree* node)
// as a predecessor, but the fgSplitBlockAfterStatement() moved all predecessors to point
// to afterDefaultCondBlock.
comp->fgRemoveRefPred(jumpTab[jumpCnt - 1]);
- comp->fgAddRefPred(defaultBB, originalSwitchBB, jumpTab[jumpCnt - 1]);
+ FlowEdge* const trueEdge = comp->fgAddRefPred(defaultBB, originalSwitchBB, jumpTab[jumpCnt - 1]);
// Turn originalSwitchBB into a BBJ_COND.
- originalSwitchBB->SetCond(defaultBB, afterDefaultCondBlock);
+ originalSwitchBB->SetCond(trueEdge, originalSwitchBB->GetTargetEdge());
bool useJumpSequence = jumpCnt < minSwitchTabJumpCnt;
@@ -1014,7 +1013,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node)
comp->fgRemoveRefPred(uniqueSucc);
}
- afterDefaultCondBlock->SetKindAndTarget(BBJ_ALWAYS, uniqueSucc->getDestinationBlock());
+ afterDefaultCondBlock->SetKindAndTargetEdge(BBJ_ALWAYS, uniqueSucc);
if (afterDefaultCondBlock->JumpsToNext())
{
@@ -1067,10 +1066,10 @@ GenTree* Lowering::LowerSwitch(GenTree* node)
// If we haven't used the afterDefaultCondBlock yet, then use that.
if (fUsedAfterDefaultCondBlock)
{
- BasicBlock* newBlock = comp->fgNewBBafter(BBJ_ALWAYS, currentBlock, true, currentBlock->Next());
+ BasicBlock* newBlock = comp->fgNewBBafter(BBJ_ALWAYS, currentBlock, true);
newBlock->SetFlags(BBF_NONE_QUIRK);
- currentBlock->SetFalseTarget(newBlock);
- comp->fgAddRefPred(newBlock, currentBlock); // The fall-through predecessor.
+ FlowEdge* const falseEdge = comp->fgAddRefPred(newBlock, currentBlock); // The fall-through predecessor.
+ currentBlock->SetFalseEdge(falseEdge);
currentBlock = newBlock;
currentBBRange = &LIR::AsRange(currentBlock);
}
@@ -1081,7 +1080,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node)
}
// Wire up the predecessor list for the "branch" case.
- comp->fgAddRefPred(targetBlock, currentBlock, jumpTab[i]);
+ FlowEdge* const newEdge = comp->fgAddRefPred(targetBlock, currentBlock, jumpTab[i]);
if (!fAnyTargetFollows && (i == jumpCnt - 2))
{
@@ -1090,13 +1089,14 @@ GenTree* Lowering::LowerSwitch(GenTree* node)
// case: there is no need to compare against the case index, since it's
// guaranteed to be taken (since the default case was handled first, above).
- currentBlock->SetKindAndTarget(BBJ_ALWAYS, targetBlock);
+ currentBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
}
else
{
// Otherwise, it's a conditional branch. Set the branch kind, then add the
// condition statement.
- currentBlock->SetCond(targetBlock, currentBlock->Next());
+ // We will set the false edge in a later iteration of the loop, or after.
+ currentBlock->SetCond(newEdge);
// Now, build the conditional statement for the current case that is
// being evaluated:
@@ -1118,7 +1118,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node)
// There is a fall-through to the following block. In the loop
// above, we deleted all the predecessor edges from the switch.
// In this case, we need to add one back.
- comp->fgAddRefPred(currentBlock->Next(), currentBlock);
+ FlowEdge* const falseEdge = comp->fgAddRefPred(currentBlock->Next(), currentBlock);
+ currentBlock->SetFalseEdge(falseEdge);
}
if (!fUsedAfterDefaultCondBlock)
@@ -1129,7 +1130,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node)
JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum);
assert(currentBlock == afterDefaultCondBlock);
assert(currentBlock->KindIs(BBJ_SWITCH));
- currentBlock->SetKindAndTarget(BBJ_ALWAYS, currentBlock->Next());
+ FlowEdge* const newEdge = comp->fgAddRefPred(currentBlock->Next(), currentBlock);
+ currentBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
currentBlock->RemoveFlags(BBF_DONT_REMOVE);
comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block.
}
@@ -1307,11 +1309,15 @@ bool Lowering::TryLowerSwitchToBitTest(
comp->fgRemoveAllRefPreds(bbCase1, bbSwitch);
comp->fgRemoveAllRefPreds(bbCase0, bbSwitch);
+ // TODO: Use old edges to influence new edge likelihoods?
+ case0Edge = comp->fgAddRefPred(bbCase0, bbSwitch);
+ case1Edge = comp->fgAddRefPred(bbCase1, bbSwitch);
+
if (bbSwitch->NextIs(bbCase0))
{
// GenCondition::C generates JC so we jump to bbCase1 when the bit is set
bbSwitchCondition = GenCondition::C;
- bbSwitch->SetCond(bbCase1, bbCase0);
+ bbSwitch->SetCond(case1Edge, case0Edge);
}
else
{
@@ -1319,13 +1325,9 @@ bool Lowering::TryLowerSwitchToBitTest(
// GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set
bbSwitchCondition = GenCondition::NC;
- bbSwitch->SetCond(bbCase0, bbCase1);
+ bbSwitch->SetCond(case0Edge, case1Edge);
}
- // TODO: Use old edges to influence new edge likelihoods?
- comp->fgAddRefPred(bbCase0, bbSwitch);
- comp->fgAddRefPred(bbCase1, bbSwitch);
-
var_types bitTableType = (bitCount <= (genTypeSize(TYP_INT) * 8)) ? TYP_INT : TYP_LONG;
GenTree* bitTableIcon = comp->gtNewIconNode(bitTable, bitTableType);
@@ -1842,6 +1844,157 @@ GenTree* Lowering::AddrGen(void* addr)
return AddrGen((ssize_t)addr);
}
+// LowerCallMemset: Replaces the following memset-like special intrinsics:
+//
+// SpanHelpers.Fill<T>(ref dstRef, CNS_SIZE, CNS_VALUE)
+// CORINFO_HELP_MEMSET(ref dstRef, CNS_VALUE, CNS_SIZE)
+// SpanHelpers.ClearWithoutReferences(ref dstRef, CNS_SIZE)
+//
+// with a GT_STORE_BLK node:
+//
+// * STORE_BLK struct<CNS_SIZE> (init) (Unroll)
+// +--* LCL_VAR byref dstRef
+// \--* CNS_INT int 0
+//
+// Arguments:
+// tree - GenTreeCall node to replace with STORE_BLK
+// next - [out] Next node to lower if this function returns true
+//
+// Return Value:
+// false if no changes were made
+//
+bool Lowering::LowerCallMemset(GenTreeCall* call, GenTree** next)
+{
+ assert(call->IsSpecialIntrinsic(comp, NI_System_SpanHelpers_Fill) ||
+ call->IsSpecialIntrinsic(comp, NI_System_SpanHelpers_ClearWithoutReferences) ||
+ call->IsHelperCall(comp, CORINFO_HELP_MEMSET));
+
+ JITDUMP("Considering Memset-like call [%06d] for unrolling.. ", comp->dspTreeID(call))
+
+ if (comp->info.compHasNextCallRetAddr)
+ {
+ JITDUMP("compHasNextCallRetAddr=true so we won't be able to remove the call - bail out.\n");
+ return false;
+ }
+
+ GenTree* dstRefArg = call->gtArgs.GetUserArgByIndex(0)->GetNode();
+ GenTree* lengthArg;
+ GenTree* valueArg;
+
+ // Fill<T>'s length is not in bytes, so we need to scale it depending on the signature
+ unsigned lengthScale;
+
+ if (call->IsSpecialIntrinsic(comp, NI_System_SpanHelpers_Fill))
+ {
+ // void SpanHelpers::Fill<T>(ref T refData, nuint numElements, T value)
+ //
+ assert(call->gtArgs.CountUserArgs() == 3);
+ lengthArg = call->gtArgs.GetUserArgByIndex(1)->GetNode();
+ CallArg* valueCallArg = call->gtArgs.GetUserArgByIndex(2);
+ valueArg = valueCallArg->GetNode();
+
+ // Get that <T> from the signature
+ lengthScale = genTypeSize(valueCallArg->GetSignatureType());
+ // NOTE: structs and TYP_REF will be ignored by the "Value is not a constant" check
+ // Some of those cases can be enabled in future, e.g. s
+ }
+ else if (call->IsHelperCall(comp, CORINFO_HELP_MEMSET))
+ {
+ // void CORINFO_HELP_MEMSET(ref T refData, byte value, nuint numElements)
+ //
+ assert(call->gtArgs.CountUserArgs() == 3);
+ lengthArg = call->gtArgs.GetUserArgByIndex(2)->GetNode();
+ valueArg = call->gtArgs.GetUserArgByIndex(1)->GetNode();
+ lengthScale = 1; // it's always in bytes
+ }
+ else
+ {
+ // void SpanHelpers::ClearWithoutReferences(ref byte b, nuint byteLength)
+ //
+ assert(call->IsSpecialIntrinsic(comp, NI_System_SpanHelpers_ClearWithoutReferences));
+ assert(call->gtArgs.CountUserArgs() == 2);
+
+ // Simple zeroing
+ lengthArg = call->gtArgs.GetUserArgByIndex(1)->GetNode();
+ valueArg = comp->gtNewZeroConNode(TYP_INT);
+ lengthScale = 1; // it's always in bytes
+ }
+
+ if (!lengthArg->IsIntegralConst())
+ {
+ JITDUMP("Length is not a constant - bail out.\n");
+ return false;
+ }
+
+ if (!valueArg->IsCnsIntOrI() || !valueArg->TypeIs(TYP_INT))
+ {
+ JITDUMP("Value is not a constant - bail out.\n");
+ return false;
+ }
+
+ // If value is not zero, we can only unroll for single-byte values
+ if (!valueArg->IsIntegralConst(0) && (lengthScale != 1))
+ {
+ JITDUMP("Value is not unroll-friendly - bail out.\n");
+ return false;
+ }
+
+ // Convert lenCns to bytes
+ ssize_t lenCns = lengthArg->AsIntCon()->IconValue();
+ if (CheckedOps::MulOverflows((target_ssize_t)lenCns, (target_ssize_t)lengthScale, CheckedOps::Signed))
+ {
+ // lenCns overflows
+ JITDUMP("lenCns * lengthScale overflows - bail out.\n")
+ return false;
+ }
+ lenCns *= (ssize_t)lengthScale;
+
+ // TODO-CQ: drop the whole thing in case of lenCns = 0
+ if ((lenCns <= 0) || (lenCns > (ssize_t)comp->getUnrollThreshold(Compiler::UnrollKind::Memset)))
+ {
+ JITDUMP("Size is either 0 or too big to unroll - bail out.\n")
+ return false;
+ }
+
+ JITDUMP("Accepted for unrolling!\nOld tree:\n");
+ DISPTREERANGE(BlockRange(), call);
+
+ if (!valueArg->IsIntegralConst(0))
+ {
+ // Non-zero (byte) value, wrap value with GT_INIT_VAL
+ GenTree* initVal = valueArg;
+ valueArg = comp->gtNewOperNode(GT_INIT_VAL, TYP_INT, initVal);
+ BlockRange().InsertAfter(initVal, valueArg);
+ }
+
+ GenTreeBlk* storeBlk =
+ comp->gtNewStoreBlkNode(comp->typGetBlkLayout((unsigned)lenCns), dstRefArg, valueArg, GTF_IND_UNALIGNED);
+ storeBlk->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
+
+ // Insert/Remove trees into LIR
+ BlockRange().InsertBefore(call, storeBlk);
+ if (call->IsSpecialIntrinsic(comp, NI_System_SpanHelpers_ClearWithoutReferences))
+ {
+ // Value didn't exist in LIR previously
+ BlockRange().InsertBefore(storeBlk, valueArg);
+ }
+
+ // Remove the call and mark everything as unused ...
+ BlockRange().Remove(call, true);
+ // ... except the args we're going to re-use
+ dstRefArg->ClearUnusedValue();
+ valueArg->ClearUnusedValue();
+ if (valueArg->OperIs(GT_INIT_VAL))
+ {
+ valueArg->gtGetOp1()->ClearUnusedValue();
+ }
+
+ JITDUMP("\nNew tree:\n");
+ DISPTREERANGE(BlockRange(), storeBlk);
+ *next = storeBlk;
+ return true;
+}
+
//------------------------------------------------------------------------
// LowerCallMemmove: Replace Buffer.Memmove(DST, SRC, CNS_SIZE) with a GT_STORE_BLK:
// Do the same for CORINFO_HELP_MEMCPY(DST, SRC, CNS_SIZE)
@@ -1862,7 +2015,7 @@ bool Lowering::LowerCallMemmove(GenTreeCall* call, GenTree** next)
{
JITDUMP("Considering Memmove [%06d] for unrolling.. ", comp->dspTreeID(call))
assert(call->IsHelperCall(comp, CORINFO_HELP_MEMCPY) ||
- (comp->lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_Buffer_Memmove));
+ (comp->lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_SpanHelpers_Memmove));
assert(call->gtArgs.CountUserArgs() == 3);
@@ -2221,11 +2374,32 @@ GenTree* Lowering::LowerCall(GenTree* node)
GenTree* nextNode = nullptr;
if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
- NamedIntrinsic ni = comp->lookupNamedIntrinsic(call->gtCallMethHnd);
- if (((ni == NI_System_Buffer_Memmove) && LowerCallMemmove(call, &nextNode)) ||
- ((ni == NI_System_SpanHelpers_SequenceEqual) && LowerCallMemcmp(call, &nextNode)))
+ switch (comp->lookupNamedIntrinsic(call->gtCallMethHnd))
{
- return nextNode;
+ case NI_System_SpanHelpers_Memmove:
+ if (LowerCallMemmove(call, &nextNode))
+ {
+ return nextNode;
+ }
+ break;
+
+ case NI_System_SpanHelpers_SequenceEqual:
+ if (LowerCallMemcmp(call, &nextNode))
+ {
+ return nextNode;
+ }
+ break;
+
+ case NI_System_SpanHelpers_Fill:
+ case NI_System_SpanHelpers_ClearWithoutReferences:
+ if (LowerCallMemset(call, &nextNode))
+ {
+ return nextNode;
+ }
+ break;
+
+ default:
+ break;
}
}
@@ -2234,6 +2408,12 @@ GenTree* Lowering::LowerCall(GenTree* node)
{
return nextNode;
}
+
+ // Try to lower CORINFO_HELP_MEMSET to unrollable STORE_BLK
+ if (call->IsHelperCall(comp, CORINFO_HELP_MEMSET) && LowerCallMemset(call, &nextNode))
+ {
+ return nextNode;
+ }
#endif
call->ClearOtherRegs();
@@ -5819,6 +5999,26 @@ GenTree* Lowering::LowerNonvirtPinvokeCall(GenTreeCall* call)
InsertPInvokeCallEpilog(call);
}
+#ifdef SWIFT_SUPPORT
+ // For Swift calls that require error handling, ensure the GT_SWIFT_ERROR node
+ // that consumes the error register is the call node's successor.
+ // This is to simplify logic for marking the error register as busy in LSRA.
+ if ((call->gtCallMoreFlags & GTF_CALL_M_SWIFT_ERROR_HANDLING) != 0)
+ {
+ GenTree* swiftErrorNode = call->gtNext;
+ assert(swiftErrorNode != nullptr);
+
+ while (!swiftErrorNode->OperIs(GT_SWIFT_ERROR))
+ {
+ swiftErrorNode = swiftErrorNode->gtNext;
+ assert(swiftErrorNode != nullptr);
+ }
+
+ BlockRange().Remove(swiftErrorNode);
+ BlockRange().InsertAfter(call, swiftErrorNode);
+ }
+#endif // SWIFT_SUPPORT
+
return result;
}
@@ -7930,24 +8130,27 @@ void Lowering::LowerBlockStoreAsHelperCall(GenTreeBlk* blkNode)
}
}
- if (blkNode->OperIs(GT_STORE_DYN_BLK))
- {
- // Size is not a constant
- size = blkNode->AsStoreDynBlk()->gtDynamicSize;
- }
- else
- {
- // Size is a constant
- size = comp->gtNewIconNode(blkNode->Size(), TYP_I_IMPL);
- BlockRange().InsertBefore(data, size);
- }
+ // Size is a constant
+ size = comp->gtNewIconNode(blkNode->Size(), TYP_I_IMPL);
+ BlockRange().InsertBefore(data, size);
// A hacky way to safely call fgMorphTree in Lower
GenTree* destPlaceholder = comp->gtNewZeroConNode(dest->TypeGet());
GenTree* dataPlaceholder = comp->gtNewZeroConNode(genActualType(data));
GenTree* sizePlaceholder = comp->gtNewZeroConNode(genActualType(size));
- GenTreeCall* call = comp->gtNewHelperCallNode(helper, TYP_VOID, destPlaceholder, dataPlaceholder, sizePlaceholder);
+ const bool isMemzero = helper == CORINFO_HELP_MEMSET ? data->IsIntegralConst(0) : false;
+
+ GenTreeCall* call;
+ if (isMemzero)
+ {
+ BlockRange().Remove(data);
+ call = comp->gtNewHelperCallNode(CORINFO_HELP_MEMZERO, TYP_VOID, destPlaceholder, sizePlaceholder);
+ }
+ else
+ {
+ call = comp->gtNewHelperCallNode(helper, TYP_VOID, destPlaceholder, dataPlaceholder, sizePlaceholder);
+ }
comp->fgMorphArgs(call);
LIR::Range range = LIR::SeqTree(comp, call);
@@ -7958,18 +8161,22 @@ void Lowering::LowerBlockStoreAsHelperCall(GenTreeBlk* blkNode)
blkNode->gtBashToNOP();
LIR::Use destUse;
- LIR::Use dataUse;
LIR::Use sizeUse;
BlockRange().TryGetUse(destPlaceholder, &destUse);
- BlockRange().TryGetUse(dataPlaceholder, &dataUse);
BlockRange().TryGetUse(sizePlaceholder, &sizeUse);
destUse.ReplaceWith(dest);
- dataUse.ReplaceWith(data);
sizeUse.ReplaceWith(size);
destPlaceholder->SetUnusedValue();
- dataPlaceholder->SetUnusedValue();
sizePlaceholder->SetUnusedValue();
+ if (!isMemzero)
+ {
+ LIR::Use dataUse;
+ BlockRange().TryGetUse(dataPlaceholder, &dataUse);
+ dataUse.ReplaceWith(data);
+ dataPlaceholder->SetUnusedValue();
+ }
+
LowerRange(rangeStart, rangeEnd);
// Finally move all GT_PUTARG_* nodes
@@ -7977,8 +8184,11 @@ void Lowering::LowerBlockStoreAsHelperCall(GenTreeBlk* blkNode)
MoveCFGCallArgs(call);
BlockRange().Remove(destPlaceholder);
- BlockRange().Remove(dataPlaceholder);
BlockRange().Remove(sizePlaceholder);
+ if (!isMemzero)
+ {
+ BlockRange().Remove(dataPlaceholder);
+ }
// Wrap with memory barriers on weak memory models
// if the block store was volatile
@@ -9011,13 +9221,10 @@ void Lowering::LowerLclHeap(GenTree* node)
//
void Lowering::LowerBlockStoreCommon(GenTreeBlk* blkNode)
{
- assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK));
+ assert(blkNode->OperIs(GT_STORE_BLK));
if (blkNode->ContainsReferences() && !blkNode->OperIsCopyBlkOp())
{
- // Make sure we don't use GT_STORE_DYN_BLK
- assert(blkNode->OperIs(GT_STORE_BLK));
-
// and we only zero it (and that zero is better to be not hoisted/CSE'd)
assert(blkNode->Data()->IsIntegralConst(0));
}
@@ -9053,17 +9260,12 @@ void Lowering::LowerBlockStoreCommon(GenTreeBlk* blkNode)
//
bool Lowering::TryTransformStoreObjAsStoreInd(GenTreeBlk* blkNode)
{
- assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK));
+ assert(blkNode->OperIs(GT_STORE_BLK));
if (!comp->opts.OptimizationEnabled())
{
return false;
}
- if (blkNode->OperIs(GT_STORE_DYN_BLK))
- {
- return false;
- }
-
var_types regType = blkNode->GetLayout()->GetRegisterType();
if (regType == TYP_UNDEF)
{
diff --git a/src/coreclr/jit/lower.h b/src/coreclr/jit/lower.h
index 5156b8899b80..095e108633b1 100644
--- a/src/coreclr/jit/lower.h
+++ b/src/coreclr/jit/lower.h
@@ -140,6 +140,7 @@ private:
GenTree* LowerCall(GenTree* call);
bool LowerCallMemmove(GenTreeCall* call, GenTree** next);
bool LowerCallMemcmp(GenTreeCall* call, GenTree** next);
+ bool LowerCallMemset(GenTreeCall* call, GenTree** next);
void LowerCFGCall(GenTreeCall* call);
void MoveCFGCallArgs(GenTreeCall* call);
void MoveCFGCallArg(GenTreeCall* call, GenTree* node);
diff --git a/src/coreclr/jit/lowerarmarch.cpp b/src/coreclr/jit/lowerarmarch.cpp
index 4cc7144ad15d..df987c1aeab5 100644
--- a/src/coreclr/jit/lowerarmarch.cpp
+++ b/src/coreclr/jit/lowerarmarch.cpp
@@ -585,8 +585,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
src = src->AsUnOp()->gtGetOp1();
}
- if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (size <= comp->getUnrollThreshold(Compiler::UnrollKind::Memset)) &&
- src->OperIs(GT_CNS_INT))
+ if ((size <= comp->getUnrollThreshold(Compiler::UnrollKind::Memset)) && src->OperIs(GT_CNS_INT))
{
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
@@ -651,7 +650,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
}
ClassLayout* layout = blkNode->GetLayout();
- bool doCpObj = !blkNode->OperIs(GT_STORE_DYN_BLK) && layout->HasGCPtr();
+ bool doCpObj = layout->HasGCPtr();
unsigned copyBlockUnrollLimit = comp->getUnrollThreshold(Compiler::UnrollKind::Memcpy);
if (doCpObj && (size <= copyBlockUnrollLimit))
@@ -686,7 +685,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
}
else
{
- assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK));
+ assert(blkNode->OperIs(GT_STORE_BLK));
LowerBlockStoreAsHelperCall(blkNode);
}
}
diff --git a/src/coreclr/jit/lowerloongarch64.cpp b/src/coreclr/jit/lowerloongarch64.cpp
index 507ef59256ca..7b3fb43dfa5d 100644
--- a/src/coreclr/jit/lowerloongarch64.cpp
+++ b/src/coreclr/jit/lowerloongarch64.cpp
@@ -296,8 +296,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
src = src->AsUnOp()->gtGetOp1();
}
- if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (size <= comp->getUnrollThreshold(Compiler::UnrollKind::Memset)) &&
- src->OperIs(GT_CNS_INT))
+ if ((size <= comp->getUnrollThreshold(Compiler::UnrollKind::Memset)) && src->OperIs(GT_CNS_INT))
{
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
@@ -351,7 +350,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
}
ClassLayout* layout = blkNode->GetLayout();
- bool doCpObj = !blkNode->OperIs(GT_STORE_DYN_BLK) && layout->HasGCPtr();
+ bool doCpObj = layout->HasGCPtr();
unsigned copyBlockUnrollLimit = comp->getUnrollThreshold(Compiler::UnrollKind::Memcpy);
if (doCpObj && (size <= copyBlockUnrollLimit))
@@ -387,7 +386,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
}
else
{
- assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK));
+ assert(blkNode->OperIs(GT_STORE_BLK));
LowerBlockStoreAsHelperCall(blkNode);
}
}
diff --git a/src/coreclr/jit/lowerriscv64.cpp b/src/coreclr/jit/lowerriscv64.cpp
index 405b9707c411..d172ded69917 100644
--- a/src/coreclr/jit/lowerriscv64.cpp
+++ b/src/coreclr/jit/lowerriscv64.cpp
@@ -245,7 +245,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
src = src->AsUnOp()->gtGetOp1();
}
- if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (size <= INITBLK_UNROLL_LIMIT) && src->OperIs(GT_CNS_INT))
+ if ((size <= INITBLK_UNROLL_LIMIT) && src->OperIs(GT_CNS_INT))
{
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
@@ -299,7 +299,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
}
ClassLayout* layout = blkNode->GetLayout();
- bool doCpObj = !blkNode->OperIs(GT_STORE_DYN_BLK) && layout->HasGCPtr();
+ bool doCpObj = layout->HasGCPtr();
if (doCpObj && (size <= CPBLK_UNROLL_LIMIT))
{
@@ -334,7 +334,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
}
else
{
- assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK));
+ assert(blkNode->OperIs(GT_STORE_BLK));
LowerBlockStoreAsHelperCall(blkNode);
}
}
diff --git a/src/coreclr/jit/lowerxarch.cpp b/src/coreclr/jit/lowerxarch.cpp
index ea4db9fedbac..e0ca67574e1c 100644
--- a/src/coreclr/jit/lowerxarch.cpp
+++ b/src/coreclr/jit/lowerxarch.cpp
@@ -349,7 +349,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
src = src->AsUnOp()->gtGetOp1();
}
- if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (size <= comp->getUnrollThreshold(Compiler::UnrollKind::Memset)))
+ if (size <= comp->getUnrollThreshold(Compiler::UnrollKind::Memset))
{
if (!src->OperIs(GT_CNS_INT))
{
@@ -436,7 +436,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
}
ClassLayout* layout = blkNode->GetLayout();
- bool doCpObj = !blkNode->OperIs(GT_STORE_DYN_BLK) && layout->HasGCPtr();
+ bool doCpObj = layout->HasGCPtr();
unsigned copyBlockUnrollLimit = comp->getUnrollThreshold(Compiler::UnrollKind::Memcpy, false);
#ifndef JIT32_GCENCODER
@@ -516,7 +516,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
}
else
{
- assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK));
+ assert(blkNode->OperIs(GT_STORE_BLK));
#ifdef TARGET_AMD64
LowerBlockStoreAsHelperCall(blkNode);
diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp
index 9ca1d59ed7a5..e411de81ed80 100644
--- a/src/coreclr/jit/lsra.cpp
+++ b/src/coreclr/jit/lsra.cpp
@@ -5109,6 +5109,13 @@ void LinearScan::allocateRegistersMinimal()
}
regsInUseThisLocation |= currentRefPosition.registerAssignment;
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_FIXED_REG, nullptr, currentRefPosition.assignedReg()));
+
+#ifdef SWIFT_SUPPORT
+ if (currentRefPosition.delayRegFree)
+ {
+ regsInUseNextLocation |= currentRefPosition.registerAssignment;
+ }
+#endif // SWIFT_SUPPORT
}
else
{
@@ -5818,6 +5825,13 @@ void LinearScan::allocateRegisters()
}
regsInUseThisLocation |= currentRefPosition.registerAssignment;
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_FIXED_REG, nullptr, currentRefPosition.assignedReg()));
+
+#ifdef SWIFT_SUPPORT
+ if (currentRefPosition.delayRegFree)
+ {
+ regsInUseNextLocation |= currentRefPosition.registerAssignment;
+ }
+#endif // SWIFT_SUPPORT
}
else
{
@@ -11745,8 +11759,7 @@ void LinearScan::verifyFinalAllocation()
}
}
- LsraLocation newLocation = currentRefPosition.nodeLocation;
- currentLocation = newLocation;
+ currentLocation = currentRefPosition.nodeLocation;
switch (currentRefPosition.refType)
{
diff --git a/src/coreclr/jit/lsraarm.cpp b/src/coreclr/jit/lsraarm.cpp
index 30991778868d..2192265984d6 100644
--- a/src/coreclr/jit/lsraarm.cpp
+++ b/src/coreclr/jit/lsraarm.cpp
@@ -579,7 +579,6 @@ int LinearScan::BuildNode(GenTree* tree)
break;
case GT_STORE_BLK:
- case GT_STORE_DYN_BLK:
srcCount = BuildBlockStore(tree->AsBlk());
break;
diff --git a/src/coreclr/jit/lsraarm64.cpp b/src/coreclr/jit/lsraarm64.cpp
index ea3bc9d7fb37..52db29fd95ab 100644
--- a/src/coreclr/jit/lsraarm64.cpp
+++ b/src/coreclr/jit/lsraarm64.cpp
@@ -1076,7 +1076,6 @@ int LinearScan::BuildNode(GenTree* tree)
break;
case GT_STORE_BLK:
- case GT_STORE_DYN_BLK:
srcCount = BuildBlockStore(tree->AsBlk());
break;
@@ -1282,6 +1281,20 @@ int LinearScan::BuildNode(GenTree* tree)
srcCount = BuildSelect(tree->AsOp());
break;
+#ifdef SWIFT_SUPPORT
+ case GT_SWIFT_ERROR:
+ srcCount = 0;
+ assert(dstCount == 1);
+
+ // Any register should do here, but the error register value should immediately
+ // be moved from GT_SWIFT_ERROR's destination register to the SwiftError struct,
+ // and we know REG_SWIFT_ERROR should be busy up to this point, anyway.
+ // By forcing LSRA to use REG_SWIFT_ERROR as both the source and destination register,
+ // we can ensure the redundant move is elided.
+ BuildDef(tree, RBM_SWIFT_ERROR);
+ break;
+#endif // SWIFT_SUPPORT
+
} // end switch (tree->OperGet())
if (tree->IsUnusedValue() && (dstCount != 0))
@@ -1684,6 +1697,7 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree, int* pDstCou
assert(intrinsicTree->OperIsMemoryLoadOrStore());
srcCount += BuildAddrUses(intrin.op3);
+ buildInternalRegisterUses();
FALLTHROUGH;
}
@@ -2024,17 +2038,30 @@ bool RefPosition::isLiveAtConsecutiveRegistersLoc(LsraLocation consecutiveRegist
return true;
}
+ bool atConsecutiveRegsLoc = consecutiveRegistersLocation == nodeLocation;
+ bool treeNeedsConsecutiveRegisters = false;
+
+ if ((treeNode != nullptr) && treeNode->OperIsHWIntrinsic())
+ {
+ const HWIntrinsic intrin(treeNode->AsHWIntrinsic());
+ treeNeedsConsecutiveRegisters = HWIntrinsicInfo::NeedsConsecutiveRegisters(intrin.id);
+ }
+
if (refType == RefTypeDef)
{
- if (treeNode->OperIsHWIntrinsic())
+ return treeNeedsConsecutiveRegisters;
+ }
+ else if (refType == RefTypeUse)
+ {
+ if (isIntervalRef() && getInterval()->isInternal)
{
- const HWIntrinsic intrin(treeNode->AsHWIntrinsic());
- return HWIntrinsicInfo::NeedsConsecutiveRegisters(intrin.id);
+ return treeNeedsConsecutiveRegisters;
}
+ return atConsecutiveRegsLoc;
}
- else if ((refType == RefTypeUse) || (refType == RefTypeUpperVectorRestore))
+ else if (refType == RefTypeUpperVectorRestore)
{
- return consecutiveRegistersLocation == nodeLocation;
+ return atConsecutiveRegsLoc;
}
return false;
}
diff --git a/src/coreclr/jit/lsraarmarch.cpp b/src/coreclr/jit/lsraarmarch.cpp
index ad112a817220..1df68f5f3f57 100644
--- a/src/coreclr/jit/lsraarmarch.cpp
+++ b/src/coreclr/jit/lsraarmarch.cpp
@@ -393,6 +393,30 @@ int LinearScan::BuildCall(GenTreeCall* call)
regMaskTP killMask = getKillSetForCall(call);
BuildDefsWithKills(call, dstCount, dstCandidates, killMask);
+#ifdef SWIFT_SUPPORT
+ if ((call->gtCallMoreFlags & GTF_CALL_M_SWIFT_ERROR_HANDLING) != 0)
+ {
+ // Tree is a Swift call with error handling; error register should have been killed
+ assert(call->unmgdCallConv == CorInfoCallConvExtension::Swift);
+ assert((killMask & RBM_SWIFT_ERROR) != 0);
+
+ // After a Swift call that might throw returns, we expect the error register to be consumed
+ // by a GT_SWIFT_ERROR node. However, we want to ensure the error register won't be trashed
+ // before GT_SWIFT_ERROR can consume it.
+ // (For example, the PInvoke epilog comes before the error register store.)
+ // To do so, delay the freeing of the error register until the next node.
+ // This only works if the next node after the call is the GT_SWIFT_ERROR node.
+ // (InsertPInvokeCallEpilog should have moved the GT_SWIFT_ERROR node during lowering.)
+ assert(call->gtNext != nullptr);
+ assert(call->gtNext->OperIs(GT_SWIFT_ERROR));
+
+ // We could use RefTypeKill, but RefTypeFixedReg is used less commonly, so the check for delayRegFree
+ // during register allocation should be cheaper in terms of TP.
+ RefPosition* pos = newRefPosition(REG_SWIFT_ERROR, currentLoc, RefTypeFixedReg, call, RBM_SWIFT_ERROR);
+ setDelayFree(pos);
+ }
+#endif // SWIFT_SUPPORT
+
// No args are placed in registers anymore.
placedArgRegs = RBM_NONE;
numPlacedArgLocals = 0;
@@ -781,7 +805,7 @@ int LinearScan::BuildBlockStore(GenTreeBlk* blkNode)
}
}
- if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (sizeRegMask != RBM_NONE))
+ if (sizeRegMask != RBM_NONE)
{
// Reserve a temp register for the block size argument.
buildInternalIntRegisterDefForNode(blkNode, sizeRegMask);
@@ -812,12 +836,6 @@ int LinearScan::BuildBlockStore(GenTreeBlk* blkNode)
}
}
- if (blkNode->OperIs(GT_STORE_DYN_BLK))
- {
- useCount++;
- BuildUse(blkNode->AsStoreDynBlk()->gtDynamicSize, sizeRegMask);
- }
-
buildInternalRegisterUses();
regMaskTP killMask = getKillSetForBlockStore(blkNode);
BuildDefsWithKills(blkNode, 0, RBM_NONE, killMask);
diff --git a/src/coreclr/jit/lsrabuild.cpp b/src/coreclr/jit/lsrabuild.cpp
index 3b9ec7f388ae..6e746d35c93a 100644
--- a/src/coreclr/jit/lsrabuild.cpp
+++ b/src/coreclr/jit/lsrabuild.cpp
@@ -880,6 +880,19 @@ regMaskTP LinearScan::getKillSetForCall(GenTreeCall* call)
assert(!call->IsVirtualStub() ||
((killMask & compiler->virtualStubParamInfo->GetRegMask()) == compiler->virtualStubParamInfo->GetRegMask()));
#endif // !TARGET_ARM
+
+#ifdef SWIFT_SUPPORT
+ // Swift calls that throw may trash the callee-saved error register,
+ // so don't use the register post-call until it is consumed by SwiftError.
+ // GTF_CALL_M_SWIFT_ERROR_HANDLING indicates the call has a SwiftError* argument,
+ // so the error register value will eventually be consumed post-call.
+ if ((call->gtCallMoreFlags & GTF_CALL_M_SWIFT_ERROR_HANDLING) != 0)
+ {
+ assert(call->unmgdCallConv == CorInfoCallConvExtension::Swift);
+ killMask |= RBM_SWIFT_ERROR;
+ }
+#endif // SWIFT_SUPPORT
+
return killMask;
}
@@ -1043,7 +1056,6 @@ regMaskTP LinearScan::getKillSetForNode(GenTree* tree)
break;
case GT_STORE_BLK:
- case GT_STORE_DYN_BLK:
killMask = getKillSetForBlockStore(tree->AsBlk());
break;
@@ -2481,7 +2493,7 @@ void LinearScan::buildIntervals()
killed = RBM_EDI | RBM_ECX | RBM_EAX;
#else
// Poisoning uses REG_SCRATCH for small vars and memset helper for big vars.
- killed = genRegMask(REG_SCRATCH) | compiler->compHelperCallKillSet(CORINFO_HELP_MEMSET);
+ killed = genRegMask(REG_SCRATCH) | compiler->compHelperCallKillSet(CORINFO_HELP_NATIVE_MEMSET);
#endif
addRefsForPhysRegMask(killed, currentLoc + 1, RefTypeKill, true);
currentLoc += 2;
diff --git a/src/coreclr/jit/lsraloongarch64.cpp b/src/coreclr/jit/lsraloongarch64.cpp
index b6e3b53d63ee..51b1871b40bd 100644
--- a/src/coreclr/jit/lsraloongarch64.cpp
+++ b/src/coreclr/jit/lsraloongarch64.cpp
@@ -394,7 +394,6 @@ int LinearScan::BuildNode(GenTree* tree)
break;
case GT_STORE_BLK:
- case GT_STORE_DYN_BLK:
srcCount = BuildBlockStore(tree->AsBlk());
break;
@@ -1157,7 +1156,7 @@ int LinearScan::BuildBlockStore(GenTreeBlk* blkNode)
}
}
- if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (sizeRegMask != RBM_NONE))
+ if (sizeRegMask != RBM_NONE)
{
// Reserve a temp register for the block size argument.
buildInternalIntRegisterDefForNode(blkNode, sizeRegMask);
@@ -1188,12 +1187,6 @@ int LinearScan::BuildBlockStore(GenTreeBlk* blkNode)
}
}
- if (blkNode->OperIs(GT_STORE_DYN_BLK))
- {
- useCount++;
- BuildUse(blkNode->AsStoreDynBlk()->gtDynamicSize, sizeRegMask);
- }
-
buildInternalRegisterUses();
regMaskTP killMask = getKillSetForBlockStore(blkNode);
BuildDefsWithKills(blkNode, 0, RBM_NONE, killMask);
diff --git a/src/coreclr/jit/lsrariscv64.cpp b/src/coreclr/jit/lsrariscv64.cpp
index c6e148d53d03..0a38d27574a1 100644
--- a/src/coreclr/jit/lsrariscv64.cpp
+++ b/src/coreclr/jit/lsrariscv64.cpp
@@ -512,7 +512,6 @@ int LinearScan::BuildNode(GenTree* tree)
break;
case GT_STORE_BLK:
- case GT_STORE_DYN_BLK:
srcCount = BuildBlockStore(tree->AsBlk());
break;
@@ -1313,7 +1312,7 @@ int LinearScan::BuildBlockStore(GenTreeBlk* blkNode)
}
}
- if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (sizeRegMask != RBM_NONE))
+ if (sizeRegMask != RBM_NONE)
{
// Reserve a temp register for the block size argument.
buildInternalIntRegisterDefForNode(blkNode, sizeRegMask);
@@ -1344,12 +1343,6 @@ int LinearScan::BuildBlockStore(GenTreeBlk* blkNode)
}
}
- if (blkNode->OperIs(GT_STORE_DYN_BLK))
- {
- useCount++;
- BuildUse(blkNode->AsStoreDynBlk()->gtDynamicSize, sizeRegMask);
- }
-
buildInternalRegisterUses();
regMaskTP killMask = getKillSetForBlockStore(blkNode);
BuildDefsWithKills(blkNode, 0, RBM_NONE, killMask);
diff --git a/src/coreclr/jit/lsraxarch.cpp b/src/coreclr/jit/lsraxarch.cpp
index 41121ee9bed2..a01c6ce5df05 100644
--- a/src/coreclr/jit/lsraxarch.cpp
+++ b/src/coreclr/jit/lsraxarch.cpp
@@ -507,7 +507,6 @@ int LinearScan::BuildNode(GenTree* tree)
#endif // FEATURE_PUT_STRUCT_ARG_STK
case GT_STORE_BLK:
- case GT_STORE_DYN_BLK:
srcCount = BuildBlockStore(tree->AsBlk());
break;
@@ -633,6 +632,20 @@ int LinearScan::BuildNode(GenTree* tree)
}
break;
+#ifdef SWIFT_SUPPORT
+ case GT_SWIFT_ERROR:
+ srcCount = 0;
+ assert(dstCount == 1);
+
+ // Any register should do here, but the error register value should immediately
+ // be moved from GT_SWIFT_ERROR's destination register to the SwiftError struct,
+ // and we know REG_SWIFT_ERROR should be busy up to this point, anyway.
+ // By forcing LSRA to use REG_SWIFT_ERROR as both the source and destination register,
+ // we can ensure the redundant move is elided.
+ BuildDef(tree, RBM_SWIFT_ERROR);
+ break;
+#endif // SWIFT_SUPPORT
+
} // end switch (tree->OperGet())
// We need to be sure that we've set srcCount and dstCount appropriately.
@@ -1357,6 +1370,30 @@ int LinearScan::BuildCall(GenTreeCall* call)
regMaskTP killMask = getKillSetForCall(call);
BuildDefsWithKills(call, dstCount, dstCandidates, killMask);
+#ifdef SWIFT_SUPPORT
+ if ((call->gtCallMoreFlags & GTF_CALL_M_SWIFT_ERROR_HANDLING) != 0)
+ {
+ // Tree is a Swift call with error handling; error register should have been killed
+ assert(call->unmgdCallConv == CorInfoCallConvExtension::Swift);
+ assert((killMask & RBM_SWIFT_ERROR) != 0);
+
+ // After a Swift call that might throw returns, we expect the error register to be consumed
+ // by a GT_SWIFT_ERROR node. However, we want to ensure the error register won't be trashed
+ // before GT_SWIFT_ERROR can consume it.
+ // (For example, the PInvoke epilog comes before the error register store.)
+ // To do so, delay the freeing of the error register until the next node.
+ // This only works if the next node after the call is the GT_SWIFT_ERROR node.
+ // (InsertPInvokeCallEpilog should have moved the GT_SWIFT_ERROR node during lowering.)
+ assert(call->gtNext != nullptr);
+ assert(call->gtNext->OperIs(GT_SWIFT_ERROR));
+
+ // We could use RefTypeKill, but RefTypeFixedReg is used less commonly, so the check for delayRegFree
+ // during register allocation should be cheaper in terms of TP.
+ RefPosition* pos = newRefPosition(REG_SWIFT_ERROR, currentLoc, RefTypeFixedReg, call, RBM_SWIFT_ERROR);
+ setDelayFree(pos);
+ }
+#endif // SWIFT_SUPPORT
+
// No args are placed in registers anymore.
placedArgRegs = RBM_NONE;
numPlacedArgLocals = 0;
@@ -1566,7 +1603,7 @@ int LinearScan::BuildBlockStore(GenTreeBlk* blkNode)
}
}
- if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (sizeRegMask != RBM_NONE))
+ if (sizeRegMask != RBM_NONE)
{
// Reserve a temp register for the block size argument.
buildInternalIntRegisterDefForNode(blkNode, sizeRegMask);
@@ -1597,12 +1634,6 @@ int LinearScan::BuildBlockStore(GenTreeBlk* blkNode)
}
}
- if (blkNode->OperIs(GT_STORE_DYN_BLK))
- {
- useCount++;
- BuildUse(blkNode->AsStoreDynBlk()->gtDynamicSize, sizeRegMask);
- }
-
#ifdef TARGET_X86
// If we require a byte register on x86, we may run into an over-constrained situation
// if we have BYTE_REG_COUNT or more uses (currently, it can be at most 4, if both the
diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp
index 9758a43b5f17..f0af492d6197 100644
--- a/src/coreclr/jit/morph.cpp
+++ b/src/coreclr/jit/morph.cpp
@@ -765,6 +765,8 @@ const char* getWellKnownArgName(WellKnownArg arg)
return "ValidateIndirectCallTarget";
case WellKnownArg::DispatchIndirectCallTarget:
return "DispatchIndirectCallTarget";
+ case WellKnownArg::SwiftSelf:
+ return "SwiftSelf";
}
return "N/A";
@@ -2043,17 +2045,23 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call
PushBack(comp, NewCallArg::Primitive(newArg).WellKnown(WellKnownArg::WrapperDelegateCell));
}
#endif // defined(TARGET_ARM)
-#ifndef TARGET_X86
+
+ bool addStubCellArg = true;
+
+#ifdef TARGET_X86
// TODO-X86-CQ: Currently RyuJIT/x86 passes args on the stack, so this is not needed.
// If/when we change that, the following code needs to be changed to correctly support the (TBD) managed calling
// convention for x86/SSE.
+ addStubCellArg = call->gtCallType != CT_INDIRECT && comp->IsTargetAbi(CORINFO_NATIVEAOT_ABI);
+#endif
+
// We are allowed to have a ret buffer argument combined
// with any of the remaining non-standard arguments
//
CLANG_FORMAT_COMMENT_ANCHOR;
- if (call->IsVirtualStub())
+ if (call->IsVirtualStub() && addStubCellArg)
{
if (!call->IsTailCallViaJitHelper())
{
@@ -2070,9 +2078,7 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call
// add as a non-standard arg.
}
}
- else
-#endif // !TARGET_X86
- if (call->gtCallType == CT_INDIRECT && (call->gtCallCookie != nullptr))
+ else if (call->gtCallType == CT_INDIRECT && (call->gtCallCookie != nullptr))
{
assert(!call->IsUnmanaged());
@@ -3175,7 +3181,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
}
// TODO-ARGS: Review this, is it really necessary to treat them specially here?
- if (call->gtArgs.IsNonStandard(this, call, &arg) && arg.AbiInfo.IsPassedInRegisters())
+ // Exception: Lower SwiftSelf struct arg to GT_LCL_FLD
+ if (call->gtArgs.IsNonStandard(this, call, &arg) && arg.AbiInfo.IsPassedInRegisters() &&
+ (arg.GetWellKnownArg() != WellKnownArg::SwiftSelf))
{
flagsSummary |= argx->gtFlags;
continue;
@@ -6094,19 +6102,11 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call)
// If this block has a flow successor, make suitable updates.
//
- BasicBlock* nextBlock = compCurBB->GetUniqueSucc();
-
- if (nextBlock == nullptr)
+ if (compCurBB->KindIs(BBJ_ALWAYS))
{
- // No unique successor. compCurBB should be a return.
+ // Flow no longer reaches the target from here.
//
- assert(compCurBB->KindIs(BBJ_RETURN));
- }
- else
- {
- // Flow no longer reaches nextBlock from here.
- //
- fgRemoveRefPred(nextBlock, compCurBB);
+ fgRemoveRefPred(compCurBB->GetTargetEdge());
// Adjust profile weights of the successor blocks.
//
@@ -6116,7 +6116,8 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call)
BasicBlock* curBlock = compCurBB;
if (curBlock->hasProfileWeight())
{
- weight_t weightLoss = curBlock->bbWeight;
+ weight_t weightLoss = curBlock->bbWeight;
+ BasicBlock* nextBlock = curBlock->GetTarget();
while (nextBlock->hasProfileWeight())
{
@@ -6145,15 +6146,22 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call)
nextBlock->bbNum, nextWeight, compCurBB->bbNum, weightLoss);
}
- curBlock = nextBlock;
- nextBlock = curBlock->GetUniqueSucc();
- if (nextBlock == nullptr)
+ if (!nextBlock->KindIs(BBJ_ALWAYS))
{
break;
}
+
+ curBlock = nextBlock;
+ nextBlock = curBlock->GetTarget();
}
}
}
+ else
+ {
+ // No unique successor. compCurBB should be a return.
+ //
+ assert(compCurBB->KindIs(BBJ_RETURN));
+ }
#if !FEATURE_TAILCALL_OPT_SHARED_RETURN
// We enable shared-ret tail call optimization for recursive calls even if
@@ -6316,7 +6324,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call)
{
// We call CORINFO_HELP_TAILCALL which does not return, so we will
// not need epilogue.
- compCurBB->SetKindAndTarget(BBJ_THROW);
+ compCurBB->SetKindAndTargetEdge(BBJ_THROW);
}
if (isRootReplaced)
@@ -7463,7 +7471,8 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa
{
// Todo: this may not look like a viable loop header.
// Might need the moral equivalent of a scratch BB.
- block->SetKindAndTarget(BBJ_ALWAYS, fgEntryBB);
+ FlowEdge* const newEdge = fgAddRefPred(fgEntryBB, block);
+ block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
}
else
{
@@ -7478,11 +7487,11 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa
// block removal on it.
//
fgFirstBB->SetFlags(BBF_DONT_REMOVE);
- block->SetKindAndTarget(BBJ_ALWAYS, fgFirstBB->Next());
+ FlowEdge* const newEdge = fgAddRefPred(fgFirstBB->Next(), block);
+ block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
}
// Finish hooking things up.
- fgAddRefPred(block->GetTarget(), block);
block->RemoveFlags(BBF_HAS_JMP);
}
@@ -9215,11 +9224,11 @@ DONE_MORPHING_CHILDREN:
// TODO #4104: there are a lot of other places where
// this condition is not checked before transformations.
- if (fgGlobalMorph)
+ noway_assert(op2);
+ if (fgGlobalMorph && !op2->TypeIs(TYP_BYREF))
{
/* Check for "op1 - cns2" , we change it to "op1 + (-cns2)" */
- noway_assert(op2);
if (op2->IsCnsIntOrI() && !op2->IsIconHandle())
{
// Negate the constant and change the node to be "+",
@@ -9237,7 +9246,7 @@ DONE_MORPHING_CHILDREN:
noway_assert(op1);
if (op1->IsCnsIntOrI())
{
- noway_assert(varTypeIsIntOrI(tree));
+ noway_assert(varTypeIsIntegralOrI(tree));
// The type of the new GT_NEG node cannot just be op2->TypeGet().
// Otherwise we may sign-extend incorrectly in cases where the GT_NEG
@@ -12785,10 +12794,6 @@ GenTree* Compiler::fgMorphTree(GenTree* tree, MorphAddrContext* mac)
gtUpdateNodeSideEffects(tree);
break;
- case GT_STORE_DYN_BLK:
- tree = fgMorphStoreDynBlock(tree->AsStoreDynBlk());
- break;
-
case GT_SELECT:
tree->AsConditional()->gtCond = fgMorphTree(tree->AsConditional()->gtCond);
tree->AsConditional()->gtOp1 = fgMorphTree(tree->AsConditional()->gtOp1);
@@ -12992,7 +12997,7 @@ void Compiler::fgAssertionGen(GenTree* tree)
AssertionIndex ifFalseAssertionIndex;
AssertionIndex ifTrueAssertionIndex;
- if (info.IsNextEdgeAssertion())
+ if (info.AssertionHoldsOnFalseEdge())
{
ifFalseAssertionIndex = info.GetAssertionIndex();
ifTrueAssertionIndex = optFindComplementary(ifFalseAssertionIndex);
@@ -13192,12 +13197,18 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
//
BasicBlock* bTaken;
BasicBlock* bNotTaken;
+ FlowEdge* edgeTaken;
if (cond->AsIntCon()->gtIconVal != 0)
{
// JTRUE 1 - transform the basic block into a BBJ_ALWAYS
bTaken = block->GetTrueTarget();
bNotTaken = block->GetFalseTarget();
+
+ // Remove 'block' from the predecessor list of 'bNotTaken' */
+ fgRemoveRefPred(block->GetFalseEdge());
+
+ edgeTaken = block->GetTrueEdge();
block->SetKind(BBJ_ALWAYS);
}
else
@@ -13205,7 +13216,12 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
// JTRUE 0 - transform the basic block into a BBJ_ALWAYS
bTaken = block->GetFalseTarget();
bNotTaken = block->GetTrueTarget();
- block->SetKindAndTarget(BBJ_ALWAYS, bTaken);
+
+ // Remove 'block' from the predecessor list of 'bNotTaken' */
+ fgRemoveRefPred(block->GetTrueEdge());
+
+ edgeTaken = block->GetFalseEdge();
+ block->SetKindAndTargetEdge(BBJ_ALWAYS, block->GetFalseEdge());
block->SetFlags(BBF_NONE_QUIRK);
}
@@ -13215,8 +13231,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
// and we have already computed the edge weights, so
// we will try to adjust some of the weights
//
- FlowEdge* edgeTaken = fgGetPredForBlock(bTaken, block);
- BasicBlock* bUpdated = nullptr; // non-NULL if we updated the weight of an internal block
+ BasicBlock* bUpdated = nullptr; // non-NULL if we updated the weight of an internal block
// We examine the taken edge (block -> bTaken)
// if block has valid profile weight and bTaken does not we try to adjust bTaken's weight
@@ -13262,19 +13277,19 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
switch (bUpdated->GetKind())
{
case BBJ_COND:
- edge = fgGetPredForBlock(bUpdated->GetFalseTarget(), bUpdated);
+ edge = bUpdated->GetFalseEdge();
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->GetFalseTarget());
- edge = fgGetPredForBlock(bUpdated->GetTrueTarget(), bUpdated);
+ edge = bUpdated->GetTrueEdge();
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->GetFalseTarget());
break;
case BBJ_ALWAYS:
- edge = fgGetPredForBlock(bUpdated->GetTarget(), bUpdated);
+ edge = bUpdated->GetTargetEdge();
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->Next());
@@ -13287,11 +13302,6 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
}
}
- /* modify the flow graph */
-
- /* Remove 'block' from the predecessor list of 'bNotTaken' */
- fgRemoveRefPred(bNotTaken, block);
-
#ifdef DEBUG
if (verbose)
{
@@ -13373,13 +13383,13 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1)))
{
- block->SetKindAndTarget(BBJ_ALWAYS, curEdge->getDestinationBlock());
+ block->SetKindAndTargetEdge(BBJ_ALWAYS, curEdge);
foundVal = true;
}
else
{
// Remove 'curEdge'
- fgRemoveRefPred(curEdge->getDestinationBlock(), block);
+ fgRemoveRefPred(curEdge);
}
}
@@ -13561,11 +13571,7 @@ void Compiler::fgMorphStmtBlockOps(BasicBlock* block, Statement* stmt)
{
if ((*use)->OperIsBlkOp())
{
- if ((*use)->OperIs(GT_STORE_DYN_BLK))
- {
- *use = m_compiler->fgMorphStoreDynBlock((*use)->AsStoreDynBlk());
- }
- else if ((*use)->OperIsInitBlkOp())
+ if ((*use)->OperIsInitBlkOp())
{
*use = m_compiler->fgMorphInitBlock(*use);
}
@@ -14129,8 +14135,8 @@ void Compiler::fgMergeBlockReturn(BasicBlock* block)
else
#endif // !TARGET_X86
{
- block->SetKindAndTarget(BBJ_ALWAYS, genReturnBB);
- fgAddRefPred(genReturnBB, block);
+ FlowEdge* const newEdge = fgAddRefPred(genReturnBB, block);
+ block->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
fgReturnCount--;
}
@@ -14608,7 +14614,7 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt)
// Conservatively propagate BBF_COPY_PROPAGATE flags to all blocks
BasicBlockFlags propagateFlagsToAll = block->GetFlagsRaw() & BBF_COPY_PROPAGATE;
BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt);
- fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock.
+ fgRemoveRefPred(block->GetTargetEdge()); // We're going to put more blocks between block and remainderBlock.
BasicBlock* condBlock = fgNewBBafter(BBJ_ALWAYS, block, true);
BasicBlock* elseBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true);
@@ -14632,16 +14638,24 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt)
assert(condBlock->bbWeight == remainderBlock->bbWeight);
assert(block->KindIs(BBJ_ALWAYS));
- block->SetTarget(condBlock);
- condBlock->SetTarget(elseBlock);
- elseBlock->SetTarget(remainderBlock);
+ {
+ FlowEdge* const newEdge = fgAddRefPred(condBlock, block);
+ block->SetTargetEdge(newEdge);
+ }
+
+ {
+ FlowEdge* const newEdge = fgAddRefPred(elseBlock, condBlock);
+ condBlock->SetTargetEdge(newEdge);
+ }
+
+ {
+ FlowEdge* const newEdge = fgAddRefPred(remainderBlock, elseBlock);
+ elseBlock->SetTargetEdge(newEdge);
+ }
+
assert(condBlock->JumpsToNext());
assert(elseBlock->JumpsToNext());
- fgAddRefPred(condBlock, block);
- fgAddRefPred(elseBlock, condBlock);
- fgAddRefPred(remainderBlock, elseBlock);
-
condBlock->SetFlags(propagateFlagsToAll | BBF_NONE_QUIRK);
elseBlock->SetFlags(propagateFlagsToAll | BBF_NONE_QUIRK);
@@ -14658,17 +14672,20 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt)
//
gtReverseCond(condExpr);
- thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true, remainderBlock);
+ thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true);
thenBlock->SetFlags(propagateFlagsToAll);
- condBlock->SetCond(elseBlock, thenBlock);
if (!block->HasFlag(BBF_INTERNAL))
{
thenBlock->RemoveFlags(BBF_INTERNAL);
thenBlock->SetFlags(BBF_IMPORTED);
}
- fgAddRefPred(thenBlock, condBlock);
- fgAddRefPred(remainderBlock, thenBlock);
+ FlowEdge* const newEdge = fgAddRefPred(remainderBlock, thenBlock);
+ thenBlock->SetTargetEdge(newEdge);
+
+ assert(condBlock->TargetIs(elseBlock));
+ FlowEdge* const falseEdge = fgAddRefPred(thenBlock, condBlock);
+ condBlock->SetCond(condBlock->GetTargetEdge(), falseEdge);
thenBlock->inheritWeightPercentage(condBlock, qmark->ThenNodeLikelihood());
elseBlock->inheritWeightPercentage(condBlock, qmark->ElseNodeLikelihood());
@@ -14682,8 +14699,11 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt)
// bbj_cond(true)
//
gtReverseCond(condExpr);
- condBlock->SetCond(remainderBlock, elseBlock);
- fgAddRefPred(remainderBlock, condBlock);
+
+ assert(condBlock->TargetIs(elseBlock));
+ FlowEdge* const trueEdge = fgAddRefPred(remainderBlock, condBlock);
+ condBlock->SetCond(trueEdge, condBlock->GetTargetEdge());
+
// Since we have no false expr, use the one we'd already created.
thenBlock = elseBlock;
elseBlock = nullptr;
@@ -14698,8 +14718,9 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt)
// +-->------------+
// bbj_cond(true)
//
- condBlock->SetCond(remainderBlock, elseBlock);
- fgAddRefPred(remainderBlock, condBlock);
+ assert(condBlock->TargetIs(elseBlock));
+ FlowEdge* const trueEdge = fgAddRefPred(remainderBlock, condBlock);
+ condBlock->SetCond(trueEdge, condBlock->GetTargetEdge());
elseBlock->inheritWeightPercentage(condBlock, qmark->ElseNodeLikelihood());
}
diff --git a/src/coreclr/jit/morphblock.cpp b/src/coreclr/jit/morphblock.cpp
index 94d10dd5887f..3f4a20215ead 100644
--- a/src/coreclr/jit/morphblock.cpp
+++ b/src/coreclr/jit/morphblock.cpp
@@ -1512,70 +1512,3 @@ GenTree* Compiler::fgMorphInitBlock(GenTree* tree)
{
return MorphInitBlockHelper::MorphInitBlock(this, tree);
}
-
-//------------------------------------------------------------------------
-// fgMorphStoreDynBlock: Morph a dynamic block store (GT_STORE_DYN_BLK).
-//
-// Performs full (pre-order and post-order) morphing for a STORE_DYN_BLK.
-//
-// Arguments:
-// tree - The GT_STORE_DYN_BLK tree to morph.
-//
-// Return Value:
-// In case the size turns into a constant - the store, transformed
-// into an "ordinary" STORE_BLK<size> one, and further morphed by
-// "fgMorphInitBlock"/"fgMorphCopyBlock". Otherwise, the original
-// tree (fully morphed).
-//
-GenTree* Compiler::fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree)
-{
- if (!tree->Data()->OperIs(GT_CNS_INT, GT_INIT_VAL))
- {
- // Data is a location and required to have GTF_DONT_CSE.
- tree->Data()->gtFlags |= GTF_DONT_CSE;
- }
-
- tree->Addr() = fgMorphTree(tree->Addr());
- tree->Data() = fgMorphTree(tree->Data());
- tree->gtDynamicSize = fgMorphTree(tree->gtDynamicSize);
-
- if (tree->gtDynamicSize->IsIntegralConst())
- {
- int64_t size = tree->gtDynamicSize->AsIntConCommon()->IntegralValue();
-
- if ((size != 0) && FitsIn<int32_t>(size))
- {
- ClassLayout* layout = typGetBlkLayout(static_cast<unsigned>(size));
- GenTree* src = tree->Data();
- if (src->OperIs(GT_IND))
- {
- assert(src->TypeIs(TYP_STRUCT));
- src->SetOper(GT_BLK);
- src->AsBlk()->Initialize(layout);
- }
-
- GenTree* store = gtNewStoreValueNode(layout, tree->Addr(), src, tree->gtFlags & GTF_IND_FLAGS);
- store->AddAllEffectsFlags(tree);
- INDEBUG(store->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
-
- JITDUMP("MorphStoreDynBlock: transformed STORE_DYN_BLK into STORE_BLK\n");
-
- return tree->OperIsCopyBlkOp() ? fgMorphCopyBlock(store) : fgMorphInitBlock(store);
- }
- }
-
- tree->SetAllEffectsFlags(tree->Addr(), tree->Data(), tree->gtDynamicSize);
-
- if (tree->OperMayThrow(this))
- {
- tree->gtFlags |= GTF_EXCEPT;
- }
- else
- {
- tree->gtFlags |= GTF_IND_NONFAULTING;
- }
-
- tree->gtFlags |= GTF_ASG;
-
- return tree;
-}
diff --git a/src/coreclr/jit/namedintrinsiclist.h b/src/coreclr/jit/namedintrinsiclist.h
index 09d33ba76a0d..9fa128c38f74 100644
--- a/src/coreclr/jit/namedintrinsiclist.h
+++ b/src/coreclr/jit/namedintrinsiclist.h
@@ -20,7 +20,7 @@ enum NamedIntrinsic : unsigned short
NI_System_BitConverter_Int64BitsToDouble,
NI_System_BitConverter_SingleToInt32Bits,
- NI_System_Buffer_Memmove,
+ NI_System_SpanHelpers_Memmove,
NI_SYSTEM_MATH_START,
NI_System_Math_Abs,
@@ -118,6 +118,8 @@ enum NamedIntrinsic : unsigned short
NI_System_String_EndsWith,
NI_System_Span_get_Item,
NI_System_Span_get_Length,
+ NI_System_SpanHelpers_ClearWithoutReferences,
+ NI_System_SpanHelpers_Fill,
NI_System_SpanHelpers_SequenceEqual,
NI_System_ReadOnlySpan_get_Item,
NI_System_ReadOnlySpan_get_Length,
diff --git a/src/coreclr/jit/optcse.cpp b/src/coreclr/jit/optcse.cpp
index 0166d4cd4743..cd554f02e563 100644
--- a/src/coreclr/jit/optcse.cpp
+++ b/src/coreclr/jit/optcse.cpp
@@ -164,6 +164,11 @@ bool Compiler::optUnmarkCSE(GenTree* tree)
// 2. Unmark the CSE information in the node
tree->gtCSEnum = NO_CSE;
+
+ // 3. Leave breadcrumbs so we know some dsc was altered
+
+ optCSEunmarks++;
+
return true;
}
else
@@ -2436,10 +2441,12 @@ void CSE_HeuristicParameterized::GreedyPolicy()
//
const int numCandidates = m_pCompiler->optCSECandidateCount;
ArrayStack<Choice> choices(m_pCompiler->getAllocator(CMK_CSE), numCandidates + 1);
+ unsigned numUnmarked = m_pCompiler->optCSEunmarks;
+ bool recomputeFeatures = true;
while (true)
{
- Choice& choice = ChooseGreedy(choices);
+ Choice& choice = ChooseGreedy(choices, recomputeFeatures);
CSEdsc* const dsc = choice.m_dsc;
#ifdef DEBUG
@@ -2472,7 +2479,16 @@ void CSE_HeuristicParameterized::GreedyPolicy()
JITDUMP("\n");
PerformCSE(&candidate);
- madeChanges = true;
+ madeChanges = true;
+ choice.m_performed = true;
+
+ // If performing this CSE impacted other CSEs, we need to
+ // recompute all cse features.
+ //
+ unsigned newNumUnmarked = m_pCompiler->optCSEunmarks;
+ assert(newNumUnmarked >= numUnmarked);
+ recomputeFeatures = (numUnmarked != newNumUnmarked);
+ numUnmarked = newNumUnmarked;
}
return;
@@ -2575,7 +2591,7 @@ void CSE_HeuristicParameterized::GetFeatures(CSEdsc* cse, double* features)
unsigned maxPostorderNum = 0;
BasicBlock* minPostorderBlock = nullptr;
BasicBlock* maxPostorderBlock = nullptr;
- for (treeStmtLst* treeList = cse->csdTreeList; treeList != nullptr && !isMakeCse; treeList = treeList->tslNext)
+ for (treeStmtLst* treeList = cse->csdTreeList; treeList != nullptr; treeList = treeList->tslNext)
{
BasicBlock* const treeBlock = treeList->tslBlock;
unsigned postorderNum = treeBlock->bbPostorderNum;
@@ -2616,7 +2632,6 @@ void CSE_HeuristicParameterized::GetFeatures(CSEdsc* cse, double* features)
// LSRA "is live across call"
//
bool isLiveAcrossCallLSRA = isLiveAcrossCall;
-
if (!isLiveAcrossCallLSRA)
{
unsigned count = 0;
@@ -2630,7 +2645,6 @@ void CSE_HeuristicParameterized::GetFeatures(CSEdsc* cse, double* features)
}
}
}
-
features[23] = booleanScale * isLiveAcrossCallLSRA;
}
@@ -2748,6 +2762,10 @@ double CSE_HeuristicParameterized::StoppingPreference()
// ChooseGreedy: examine candidates and choose the next CSE to perform
// via greedy policy
//
+// Arguments:
+// choices -- array of choices, possibly already filled in
+// recompute -- if true, rebuild the choice array from scratch
+//
// Returns:
// Choice of CSE to perform
//
@@ -2755,10 +2773,25 @@ double CSE_HeuristicParameterized::StoppingPreference()
// Picks the most-preferred candidate.
// If there is a tie, picks stop, or the lowest cse index.
//
-CSE_HeuristicParameterized::Choice& CSE_HeuristicParameterized::ChooseGreedy(ArrayStack<Choice>& choices)
+CSE_HeuristicParameterized::Choice& CSE_HeuristicParameterized::ChooseGreedy(ArrayStack<Choice>& choices,
+ bool recompute)
{
- choices.Reset();
- BuildChoices(choices);
+ if (recompute)
+ {
+ choices.Reset();
+ BuildChoices(choices);
+ }
+ else
+ {
+ // Always recompute the stopping preference as this
+ // reflects ambient state after each CSE.
+ //
+ // By convention, this is at TopRef(0).
+ //
+ Choice& stopping = choices.TopRef(0);
+ assert(stopping.m_dsc == nullptr);
+ stopping.m_preference = StoppingPreference();
+ }
// Find the maximally preferred case.
//
@@ -2766,8 +2799,14 @@ CSE_HeuristicParameterized::Choice& CSE_HeuristicParameterized::ChooseGreedy(Arr
for (int i = 1; i < choices.Height(); i++)
{
- Choice& choice = choices.TopRef(i);
- Choice& bestChoice = choices.TopRef(choiceNum);
+ const Choice& choice = choices.TopRef(i);
+
+ if (choice.m_performed == true)
+ {
+ continue;
+ }
+
+ const Choice& bestChoice = choices.TopRef(choiceNum);
const double delta = choice.m_preference - bestChoice.m_preference;
@@ -2811,6 +2850,8 @@ CSE_HeuristicParameterized::Choice& CSE_HeuristicParameterized::ChooseGreedy(Arr
//
void CSE_HeuristicParameterized::BuildChoices(ArrayStack<Choice>& choices)
{
+ JITDUMP("Building choice array...\n");
+
for (unsigned i = 0; i < m_pCompiler->optCSECandidateCount; i++)
{
CSEdsc* const dsc = sortTab[i];
@@ -2893,9 +2934,15 @@ void CSE_HeuristicParameterized::DumpChoices(ArrayStack<Choice>& choices, int hi
{
for (int i = 0; i < choices.Height(); i++)
{
- Choice& choice = choices.TopRef(i);
- CSEdsc* const cse = choice.m_dsc;
- const char* msg = i == highlight ? "=>" : " ";
+ const Choice& choice = choices.TopRef(i);
+
+ if (choice.m_performed == true)
+ {
+ continue;
+ }
+
+ CSEdsc* const cse = choice.m_dsc;
+ const char* msg = (i == highlight) ? "=>" : " ";
if (cse != nullptr)
{
printf("%s%2d: " FMT_CSE " preference %10.7f likelihood %10.7f\n", msg, i, cse->csdIndex,
@@ -2920,9 +2967,15 @@ void CSE_HeuristicParameterized::DumpChoices(ArrayStack<Choice>& choices, CSEdsc
{
for (int i = 0; i < choices.Height(); i++)
{
- Choice& choice = choices.TopRef(i);
- CSEdsc* const cse = choice.m_dsc;
- const char* msg = cse == highlight ? "=>" : " ";
+ const Choice& choice = choices.TopRef(i);
+
+ if (choice.m_performed == true)
+ {
+ continue;
+ }
+
+ CSEdsc* const cse = choice.m_dsc;
+ const char* msg = (cse == highlight) ? "=>" : " ";
if (cse != nullptr)
{
printf("%s%2d: " FMT_CSE " preference %10.7f likelihood %10.7f\n", msg, i, cse->csdIndex,
@@ -4422,50 +4475,62 @@ bool CSE_HeuristicCommon::IsCompatibleType(var_types cseLclVarTyp, var_types exp
return false;
}
-// PerformCSE() takes a successful candidate and performs the appropriate replacements:
+//------------------------------------------------------------------------
+// PerformCSE: takes a successful candidate and performs the appropriate replacements
+//
+// Arguments:
+// successfulCandidate - cse candidate to perform
//
// It will replace all of the CSE defs with assignments to a new "cse0" LclVar
// and will replace all of the CSE uses with reads of the "cse0" LclVar
//
// It will also put cse0 into SSA if there is just one def.
+//
void CSE_HeuristicCommon::PerformCSE(CSE_Candidate* successfulCandidate)
{
AdjustHeuristic(successfulCandidate);
+ CSEdsc* const dsc = successfulCandidate->CseDsc();
#ifdef DEBUG
// Setup the message arg for lvaGrabTemp()
//
- const char* grabTempMessage = "CSE - unknown";
+ const char* heuristicTempMessage = "";
if (successfulCandidate->IsAggressive())
{
- grabTempMessage = "CSE - aggressive";
+ heuristicTempMessage = ": aggressive";
}
else if (successfulCandidate->IsModerate())
{
- grabTempMessage = "CSE - moderate";
+ heuristicTempMessage = ": moderate";
}
else if (successfulCandidate->IsConservative())
{
- grabTempMessage = "CSE - conservative";
+ heuristicTempMessage = ": conservative";
}
else if (successfulCandidate->IsStressCSE())
{
- grabTempMessage = "CSE - stress mode";
+ heuristicTempMessage = ": stress";
}
else if (successfulCandidate->IsRandom())
{
- grabTempMessage = "CSE - random";
+ heuristicTempMessage = ": random";
}
-#endif // DEBUG
- /* Introduce a new temp for the CSE */
+ const char* const grabTempMessage = m_pCompiler->printfAlloc(FMT_CSE "%s", dsc->csdIndex, heuristicTempMessage);
+
+ // Add this candidate to the CSE sequence
+ //
+ m_sequence->push_back(dsc->csdIndex);
+
+#endif // DEBUG
- // we will create a long lifetime temp for the new CSE LclVar
+ // Allocate a CSE temp
+ //
unsigned cseLclVarNum = m_pCompiler->lvaGrabTemp(false DEBUGARG(grabTempMessage));
var_types cseLclVarTyp = genActualType(successfulCandidate->Expr()->TypeGet());
- LclVarDsc* lclDsc = m_pCompiler->lvaGetDesc(cseLclVarNum);
+ LclVarDsc* const lclDsc = m_pCompiler->lvaGetDesc(cseLclVarNum);
if (cseLclVarTyp == TYP_STRUCT)
{
m_pCompiler->lvaSetStruct(cseLclVarNum, successfulCandidate->Expr()->GetLayout(m_pCompiler), false);
@@ -4474,6 +4539,7 @@ void CSE_HeuristicCommon::PerformCSE(CSE_Candidate* successfulCandidate)
lclDsc->lvIsCSE = true;
// Record that we created a new LclVar for use as a CSE temp
+ //
m_addCSEcount++;
m_pCompiler->optCSEcount++;
m_pCompiler->Metrics.CseCount++;
@@ -4484,11 +4550,9 @@ void CSE_HeuristicCommon::PerformCSE(CSE_Candidate* successfulCandidate)
//
// Later we will unmark any nested CSE's for the CSE uses.
//
- CSEdsc* dsc = successfulCandidate->CseDsc();
- INDEBUG(m_sequence->push_back(dsc->csdIndex));
-
// If there's just a single def for the CSE, we'll put this
// CSE into SSA form on the fly. We won't need any PHIs.
+ //
unsigned cseSsaNum = SsaConfig::RESERVED_SSA_NUM;
LclSsaVarDsc* ssaVarDsc = nullptr;
diff --git a/src/coreclr/jit/optcse.h b/src/coreclr/jit/optcse.h
index 3d1c7f0702ba..550f754f6a8b 100644
--- a/src/coreclr/jit/optcse.h
+++ b/src/coreclr/jit/optcse.h
@@ -151,12 +151,14 @@ class CSE_HeuristicParameterized : public CSE_HeuristicCommon
protected:
struct Choice
{
- Choice(CSEdsc* dsc, double preference) : m_dsc(dsc), m_preference(preference), m_softmax(0)
+ Choice(CSEdsc* dsc, double preference) : m_dsc(dsc), m_preference(preference), m_softmax(0), m_performed(false)
{
}
+
CSEdsc* m_dsc;
double m_preference;
double m_softmax;
+ bool m_performed;
};
enum
@@ -185,7 +187,7 @@ public:
double StoppingPreference();
void BuildChoices(ArrayStack<Choice>& choices);
- Choice& ChooseGreedy(ArrayStack<Choice>& choices);
+ Choice& ChooseGreedy(ArrayStack<Choice>& choices, bool recompute);
virtual const char* Name() const
{
diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp
index f9616636681b..5b609c0b4fb0 100644
--- a/src/coreclr/jit/optimizebools.cpp
+++ b/src/coreclr/jit/optimizebools.cpp
@@ -46,7 +46,7 @@ public:
private:
BasicBlock* m_b1; // The first basic block with the BBJ_COND conditional jump type
BasicBlock* m_b2; // The next basic block of m_b1. Either BBJ_COND or BBJ_RETURN type
- BasicBlock* m_b3; // m_b1->bbTarget. Null if m_b2 is not a return block.
+ BasicBlock* m_b3; // m_b1's target block. Null if m_b2 is not a return block.
Compiler* m_comp; // The pointer to the Compiler instance
@@ -89,7 +89,7 @@ private:
// Notes:
// m_b1 and m_b2 are set on entry.
//
-// Case 1: if b1.bbTarget == b2.bbTarget, it transforms
+// Case 1: if b1->TargetIs(b2->GetTarget()), it transforms
// B1 : brtrue(t1, Bx)
// B2 : brtrue(t2, Bx)
// B3 :
@@ -107,7 +107,7 @@ private:
// B3: GT_RETURN (BBJ_RETURN)
// B4: GT_RETURN (BBJ_RETURN)
//
-// Case 2: if B2->FalseTargetIs(B1.bbTarget), it transforms
+// Case 2: if B2->FalseTargetIs(B1->GetTarget()), it transforms
// B1 : brtrue(t1, B3)
// B2 : brtrue(t2, Bx)
// B3 :
@@ -123,7 +123,7 @@ bool OptBoolsDsc::optOptimizeBoolsCondBlock()
m_t3 = nullptr;
- // Check if m_b1 and m_b2 have the same bbTarget
+ // Check if m_b1 and m_b2 have the same target
if (m_b1->TrueTargetIs(m_b2->GetTrueTarget()))
{
@@ -808,19 +808,25 @@ bool OptBoolsDsc::optOptimizeRangeTests()
}
// Re-direct firstBlock to jump to inRangeBb
- m_comp->fgAddRefPred(inRangeBb, m_b1);
+ FlowEdge* const newEdge = m_comp->fgAddRefPred(inRangeBb, m_b1);
+ FlowEdge* const oldEdge = m_b1->GetFalseEdge();
+
if (!cmp2IsReversed)
{
- m_b1->SetTrueTarget(inRangeBb);
- m_b1->SetFalseTarget(notInRangeBb);
+ m_b1->SetFalseEdge(m_b1->GetTrueEdge());
+ m_b1->SetTrueEdge(newEdge);
+ assert(m_b1->TrueTargetIs(inRangeBb));
+ assert(m_b1->FalseTargetIs(notInRangeBb));
}
else
{
- m_b1->SetFalseTarget(inRangeBb);
+ m_b1->SetFalseEdge(newEdge);
+ assert(m_b1->TrueTargetIs(notInRangeBb));
+ assert(m_b1->FalseTargetIs(inRangeBb));
}
// Remove the 2nd condition block as we no longer need it
- m_comp->fgRemoveRefPred(m_b2, m_b1);
+ m_comp->fgRemoveRefPred(oldEdge);
m_comp->fgRemoveBlock(m_b2, true);
Statement* stmt = m_b1->lastStmt();
@@ -1012,8 +1018,8 @@ bool OptBoolsDsc::optOptimizeCompareChainCondBlock()
m_comp->fgSetStmtSeq(s2);
// Update the flow.
- m_comp->fgRemoveRefPred(m_b1->GetTrueTarget(), m_b1);
- m_b1->SetKindAndTarget(BBJ_ALWAYS, m_b1->GetFalseTarget());
+ m_comp->fgRemoveRefPred(m_b1->GetTrueEdge());
+ m_b1->SetKindAndTargetEdge(BBJ_ALWAYS, m_b1->GetFalseEdge());
m_b1->SetFlags(BBF_NONE_QUIRK);
// Fixup flags.
@@ -1266,22 +1272,19 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees()
{
// Update edges if m_b1: BBJ_COND and m_b2: BBJ_COND
- FlowEdge* edge1 = m_comp->fgGetPredForBlock(m_b1->GetTrueTarget(), m_b1);
+ FlowEdge* edge1 = m_b1->GetTrueEdge();
FlowEdge* edge2;
if (m_sameTarget)
{
- edge2 = m_comp->fgGetPredForBlock(m_b2->GetTrueTarget(), m_b2);
+ edge2 = m_b2->GetTrueEdge();
}
else
{
- edge2 = m_comp->fgGetPredForBlock(m_b2->GetFalseTarget(), m_b2);
-
- m_comp->fgRemoveRefPred(m_b1->GetTrueTarget(), m_b1);
-
- m_b1->SetTrueTarget(m_b2->GetTrueTarget());
-
- m_comp->fgAddRefPred(m_b2->GetTrueTarget(), m_b1);
+ edge2 = m_b2->GetFalseEdge();
+ m_comp->fgRemoveRefPred(m_b1->GetTrueEdge());
+ FlowEdge* const newEdge = m_comp->fgAddRefPred(m_b2->GetTrueTarget(), m_b1);
+ m_b1->SetTrueEdge(newEdge);
}
assert(edge1 != nullptr);
@@ -1307,7 +1310,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees()
assert(m_b2->KindIs(BBJ_RETURN));
assert(m_b1->FalseTargetIs(m_b2));
assert(m_b3 != nullptr);
- m_b1->SetKindAndTarget(BBJ_RETURN);
+ m_b1->SetKindAndTargetEdge(BBJ_RETURN);
}
else
{
@@ -1322,11 +1325,12 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees()
{
// Update bbRefs and bbPreds
//
- // Replace pred 'm_b2' for 'm_b2->bbFalseTarget' with 'm_b1'
- // Remove pred 'm_b2' for 'm_b2->bbTrueTarget'
- m_comp->fgReplacePred(m_b2->GetFalseTarget(), m_b2, m_b1);
- m_comp->fgRemoveRefPred(m_b2->GetTrueTarget(), m_b2);
- m_b1->SetFalseTarget(m_b2->GetFalseTarget());
+ // Replace pred 'm_b2' for m_b2's false target with 'm_b1'
+ // Remove pred 'm_b2' for m_b2's true target
+ FlowEdge* falseEdge = m_b2->GetFalseEdge();
+ m_comp->fgReplacePred(falseEdge, m_b1);
+ m_comp->fgRemoveRefPred(m_b2->GetTrueEdge());
+ m_b1->SetFalseEdge(falseEdge);
}
// Get rid of the second block
@@ -1361,7 +1365,7 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees()
// Notes:
// m_b1, m_b2 and m_b3 of OptBoolsDsc are set on entry.
//
-// if B1.bbTarget == b3, it transforms
+// if B1->TargetIs(b3), it transforms
// B1 : brtrue(t1, B3)
// B2 : ret(t2)
// B3 : ret(0)
diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp
index 169d5f267749..3fea8881bf16 100644
--- a/src/coreclr/jit/optimizer.cpp
+++ b/src/coreclr/jit/optimizer.cpp
@@ -34,6 +34,7 @@ void Compiler::optInit()
optCSECandidateCount = 0;
optCSEattempt = 0;
optCSEheuristic = nullptr;
+ optCSEunmarks = 0;
}
DataFlow::DataFlow(Compiler* pCompiler) : m_pCompiler(pCompiler)
@@ -584,20 +585,24 @@ void Compiler::optSetMappedBlockTargets(BasicBlock* blk, BasicBlock* newBlk, Blo
case BBJ_CALLFINALLY:
case BBJ_CALLFINALLYRET:
case BBJ_LEAVE:
+ {
+ FlowEdge* newEdge;
+
// Determine if newBlk should be redirected to a different target from blk's target
if (redirectMap->Lookup(blk->GetTarget(), &newTarget))
{
// newBlk needs to be redirected to a new target
- newBlk->SetKindAndTarget(blk->GetKind(), newTarget);
+ newEdge = fgAddRefPred(newTarget, newBlk);
}
else
{
// newBlk uses the same target as blk
- newBlk->SetKindAndTarget(blk->GetKind(), blk->GetTarget());
+ newEdge = fgAddRefPred(blk->GetTarget(), newBlk);
}
- fgAddRefPred(newBlk->GetTarget(), newBlk);
+ newBlk->SetKindAndTargetEdge(blk->GetKind(), newEdge);
break;
+ }
case BBJ_COND:
{
@@ -626,9 +631,9 @@ void Compiler::optSetMappedBlockTargets(BasicBlock* blk, BasicBlock* newBlk, Blo
falseTarget = blk->GetFalseTarget();
}
- fgAddRefPred(trueTarget, newBlk);
- fgAddRefPred(falseTarget, newBlk);
- newBlk->SetCond(trueTarget, falseTarget);
+ FlowEdge* const trueEdge = fgAddRefPred(trueTarget, newBlk);
+ FlowEdge* const falseEdge = fgAddRefPred(falseTarget, newBlk);
+ newBlk->SetCond(trueEdge, falseEdge);
break;
}
@@ -695,16 +700,18 @@ void Compiler::optSetMappedBlockTargets(BasicBlock* blk, BasicBlock* newBlk, Blo
case BBJ_EHCATCHRET:
case BBJ_EHFILTERRET:
+ {
// newBlk's jump target should not need to be redirected
assert(!redirectMap->Lookup(blk->GetTarget(), &newTarget));
- newBlk->SetKindAndTarget(blk->GetKind(), blk->GetTarget());
- fgAddRefPred(newBlk->GetTarget(), newBlk);
+ FlowEdge* newEdge = fgAddRefPred(newBlk->GetTarget(), newBlk);
+ newBlk->SetKindAndTargetEdge(blk->GetKind(), newEdge);
break;
+ }
default:
// blk doesn't have a jump destination
assert(blk->NumSucc() == 0);
- newBlk->SetKindAndTarget(blk->GetKind());
+ newBlk->SetKindAndTargetEdge(blk->GetKind());
break;
}
@@ -1709,12 +1716,12 @@ void Compiler::optRedirectPrevUnrollIteration(FlowGraphNaturalLoop* loop, BasicB
testCopyStmt->SetRootNode(sideEffList);
}
- fgRemoveRefPred(prevTestBlock->GetTrueTarget(), prevTestBlock);
- fgRemoveRefPred(prevTestBlock->GetFalseTarget(), prevTestBlock);
+ fgRemoveRefPred(prevTestBlock->GetTrueEdge());
+ fgRemoveRefPred(prevTestBlock->GetFalseEdge());
// Redirect exit edge from previous iteration to new entry.
- prevTestBlock->SetKindAndTarget(BBJ_ALWAYS, target);
- fgAddRefPred(target, prevTestBlock);
+ FlowEdge* const newEdge = fgAddRefPred(target, prevTestBlock);
+ prevTestBlock->SetKindAndTargetEdge(BBJ_ALWAYS, newEdge);
JITDUMP("Redirecting previously created exiting " FMT_BB " -> " FMT_BB "\n", prevTestBlock->bbNum,
target->bbNum);
@@ -1923,7 +1930,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
return false;
}
- // Since bTest is a BBJ_COND it will have a bbFalseTarget
+ // Since bTest is a BBJ_COND it will have a false target
//
BasicBlock* const bJoin = bTest->GetFalseTarget();
noway_assert(bJoin != nullptr);
@@ -1945,7 +1952,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
}
// It has to be a forward jump. Defer this check until after all the cheap checks
- // are done, since it iterates forward in the block list looking for bbTarget.
+ // are done, since it iterates forward in the block list looking for block's target.
// TODO-CQ: Check if we can also optimize the backwards jump as well.
//
if (!fgIsForwardBranch(block, block->GetTarget()))
@@ -2135,10 +2142,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
bool foundCondTree = false;
// Create a new block after `block` to put the copied condition code.
- BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true, bJoin);
- block->SetKindAndTarget(BBJ_ALWAYS, bNewCond);
- block->SetFlags(BBF_NONE_QUIRK);
- assert(block->JumpsToNext());
+ BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true);
// Clone each statement in bTest and append to bNewCond.
for (Statement* const stmt : bTest->Statements())
@@ -2197,12 +2201,17 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
// Update pred info
//
- bNewCond->SetFalseTarget(bTop);
- fgAddRefPred(bJoin, bNewCond);
- fgAddRefPred(bTop, bNewCond);
+ FlowEdge* const trueEdge = fgAddRefPred(bJoin, bNewCond);
+ FlowEdge* const falseEdge = fgAddRefPred(bTop, bNewCond);
+ bNewCond->SetTrueEdge(trueEdge);
+ bNewCond->SetFalseEdge(falseEdge);
+
+ fgRemoveRefPred(block->GetTargetEdge());
+ FlowEdge* const newEdge = fgAddRefPred(bNewCond, block);
- fgAddRefPred(bNewCond, block);
- fgRemoveRefPred(bTest, block);
+ block->SetTargetEdge(newEdge);
+ block->SetFlags(BBF_NONE_QUIRK);
+ assert(block->JumpsToNext());
// Move all predecessor edges that look like loop entry edges to point to the new cloned condition
// block, not the existing condition block. The idea is that if we only move `block` to point to
@@ -2281,8 +2290,8 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
weight_t const testToNextWeight = weightTop * testToNextLikelihood;
weight_t const testToAfterWeight = weightTop * testToAfterLikelihood;
- FlowEdge* const edgeTestToNext = fgGetPredForBlock(bTop, bTest);
- FlowEdge* const edgeTestToAfter = fgGetPredForBlock(bTest->GetFalseTarget(), bTest);
+ FlowEdge* const edgeTestToNext = bTest->GetTrueEdge();
+ FlowEdge* const edgeTestToAfter = bTest->GetFalseEdge();
JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (iterate loop)\n", bTest->bbNum, bTop->bbNum,
testToNextWeight);
@@ -2302,8 +2311,8 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
weight_t const blockToNextWeight = weightBlock * blockToNextLikelihood;
weight_t const blockToAfterWeight = weightBlock * blockToAfterLikelihood;
- FlowEdge* const edgeBlockToNext = fgGetPredForBlock(bNewCond->GetFalseTarget(), bNewCond);
- FlowEdge* const edgeBlockToAfter = fgGetPredForBlock(bNewCond->GetTrueTarget(), bNewCond);
+ FlowEdge* const edgeBlockToNext = bNewCond->GetFalseEdge();
+ FlowEdge* const edgeBlockToAfter = bNewCond->GetTrueEdge();
JITDUMP("Setting weight of " FMT_BB " -> " FMT_BB " to " FMT_WT " (enter loop)\n", bNewCond->bbNum,
bNewCond->GetFalseTarget()->bbNum, blockToNextWeight);
@@ -2976,7 +2985,7 @@ bool Compiler::optCreatePreheader(FlowGraphNaturalLoop* loop)
insertBefore = header;
}
- BasicBlock* preheader = fgNewBBbefore(BBJ_ALWAYS, insertBefore, false, header);
+ BasicBlock* preheader = fgNewBBbefore(BBJ_ALWAYS, insertBefore, false);
preheader->SetFlags(BBF_INTERNAL);
fgSetEHRegionForNewPreheaderOrExit(preheader);
@@ -2989,7 +2998,8 @@ bool Compiler::optCreatePreheader(FlowGraphNaturalLoop* loop)
JITDUMP("Created new preheader " FMT_BB " for " FMT_LP "\n", preheader->bbNum, loop->GetIndex());
- fgAddRefPred(header, preheader);
+ FlowEdge* const newEdge = fgAddRefPred(header, preheader);
+ preheader->SetTargetEdge(newEdge);
for (FlowEdge* enterEdge : loop->EntryEdges())
{
@@ -3092,26 +3102,27 @@ bool Compiler::optCanonicalizeExit(FlowGraphNaturalLoop* loop, BasicBlock* exit)
BasicBlock* bottom = loop->GetLexicallyBottomMostBlock();
if (bottom->hasTryIndex() && (bottom->getTryIndex() == finallyBlock->getHndIndex()) && !bottom->hasHndIndex())
{
- newExit = fgNewBBafter(BBJ_ALWAYS, bottom, true, exit);
+ newExit = fgNewBBafter(BBJ_ALWAYS, bottom, true);
}
else
{
// Otherwise just do the heavy-handed thing and insert it anywhere in the right region.
- newExit = fgNewBBinRegion(BBJ_ALWAYS, finallyBlock->bbHndIndex, 0, nullptr, exit, /* putInFilter */ false,
+ newExit = fgNewBBinRegion(BBJ_ALWAYS, finallyBlock->bbHndIndex, 0, nullptr, /* putInFilter */ false,
/* runRarely */ false, /* insertAtEnd */ true);
}
}
else
#endif
{
- newExit = fgNewBBbefore(BBJ_ALWAYS, exit, false, exit);
+ newExit = fgNewBBbefore(BBJ_ALWAYS, exit, false);
newExit->SetFlags(BBF_NONE_QUIRK);
fgSetEHRegionForNewPreheaderOrExit(newExit);
}
newExit->SetFlags(BBF_INTERNAL);
- fgAddRefPred(exit, newExit);
+ FlowEdge* const newEdge = fgAddRefPred(exit, newExit);
+ newExit->SetTargetEdge(newEdge);
newExit->bbCodeOffs = exit->bbCodeOffs;
@@ -3268,7 +3279,7 @@ void Compiler::optSetWeightForPreheaderOrExit(FlowGraphNaturalLoop* loop, BasicB
}
// Normalize block -> target weight
- FlowEdge* const edgeFromBlock = fgGetPredForBlock(block->GetTarget(), block);
+ FlowEdge* const edgeFromBlock = block->GetTargetEdge();
assert(edgeFromBlock != nullptr);
edgeFromBlock->setEdgeWeights(block->bbWeight, block->bbWeight, block->GetTarget());
}
@@ -5615,7 +5626,6 @@ void Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk, FlowGraphNatura
case GT_XCHG:
case GT_CMPXCHG:
case GT_MEMORYBARRIER:
- case GT_STORE_DYN_BLK:
{
memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed);
}
diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp
index 27b94470962e..c98205a55ea4 100644
--- a/src/coreclr/jit/patchpoint.cpp
+++ b/src/coreclr/jit/patchpoint.cpp
@@ -101,13 +101,12 @@ private:
// Arguments:
// jumpKind - jump kind for the new basic block
// insertAfter - basic block, after which compiler has to insert the new one.
- // jumpDest - jump target for the new basic block. Defaults to nullptr.
//
// Return Value:
// new basic block.
- BasicBlock* CreateAndInsertBasicBlock(BBKinds jumpKind, BasicBlock* insertAfter, BasicBlock* jumpDest = nullptr)
+ BasicBlock* CreateAndInsertBasicBlock(BBKinds jumpKind, BasicBlock* insertAfter)
{
- BasicBlock* block = compiler->fgNewBBafter(jumpKind, insertAfter, true, jumpDest);
+ BasicBlock* block = compiler->fgNewBBafter(jumpKind, insertAfter, true);
block->SetFlags(BBF_IMPORTED);
return block;
}
@@ -143,21 +142,22 @@ private:
// Current block now becomes the test block
BasicBlock* remainderBlock = compiler->fgSplitBlockAtBeginning(block);
- BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, block, block->Next());
+ BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, block);
// Update flow and flags
- block->SetCond(remainderBlock, helperBlock);
block->SetFlags(BBF_INTERNAL);
-
helperBlock->SetFlags(BBF_BACKWARD_JUMP | BBF_NONE_QUIRK);
+ assert(block->TargetIs(remainderBlock));
FlowEdge* const falseEdge = compiler->fgAddRefPred(helperBlock, block);
- FlowEdge* const trueEdge = compiler->fgGetPredForBlock(remainderBlock, block);
+ FlowEdge* const trueEdge = block->GetTargetEdge();
trueEdge->setLikelihood(HIGH_PROBABILITY / 100.0);
falseEdge->setLikelihood((100 - HIGH_PROBABILITY) / 100.0);
+ block->SetCond(trueEdge, falseEdge);
FlowEdge* const newEdge = compiler->fgAddRefPred(remainderBlock, helperBlock);
newEdge->setLikelihood(1.0);
+ helperBlock->SetTargetEdge(newEdge);
// Update weights
remainderBlock->inheritWeight(block);
@@ -238,7 +238,7 @@ private:
}
// Update flow
- block->SetKindAndTarget(BBJ_THROW);
+ block->SetKindAndTargetEdge(BBJ_THROW);
// Add helper call
//
diff --git a/src/coreclr/jit/scev.cpp b/src/coreclr/jit/scev.cpp
new file mode 100644
index 000000000000..911cb1ff0502
--- /dev/null
+++ b/src/coreclr/jit/scev.cpp
@@ -0,0 +1,959 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+// This file contains code to analyze how the value of induction variables
+// evolve (scalar evolution analysis), and to turn them into the SCEV IR
+// defined in scev.h. The analysis is inspired by "Michael Wolfe. 1992. Beyond
+// induction variables." and also by LLVM's scalar evolution analysis.
+//
+// The main idea of scalar evolution nalysis is to give a closed form
+// describing the value of tree nodes inside loops even when taking into
+// account that they are changing on each loop iteration. This is useful for
+// optimizations that want to reason about values of IR nodes inside loops,
+// such as IV widening or strength reduction.
+//
+// To represent the possibility of evolution the SCEV IR includes the concept
+// of an add recurrence <loop, start, step>, which describes a value that
+// starts at "start" and changes by adding "step" at each iteration. The IR
+// nodes that change in this way (or depend on something that changes in this
+// way) are generally called induction variables.
+//
+// An add recurrence arises only when a local exists in the loop that is
+// mutated in each iteration. Such a local will naturally end up with a phi
+// node in the loop header. These locals are called primary (or basic)
+// induction variables. The non-primary IVs (which always must depend on the
+// primary IVs) are sometimes called secondary IVs.
+//
+// The job of the analysis is to go from a tree node to a SCEV node that
+// describes its value (possibly taking its evolution into account). Note that
+// SCEV nodes are immutable and the values they represent are _not_
+// flow-dependent; that is, they don't exist at a specific location inside the
+// loop, even though some particular tree node gave rise to that SCEV node. The
+// analysis itself _is_ flow-dependent and guarantees that the Scev* returned
+// describes the value that corresponds to what the tree node computes at its
+// specific location. However, it would be perfectly legal for two trees at
+// different locations in the loop to analyze to the same SCEV node (even
+// potentially returning the same pointer). For example, in theory "i" and "j"
+// in the following loop would both be represented by the same add recurrence
+// <L, 0, 1>, and the analysis could even return the same Scev* for both of
+// them, even if it does not today:
+//
+// int i = 0;
+// while (true)
+// {
+// i++;
+// ...
+// int j = i - 1;
+// }
+//
+// Actually materializing the value of a SCEV node back into tree IR is not
+// implemented yet, but generally would depend on the availability of tree
+// nodes that compute the dependent values at the point where the IR is to be
+// materialized.
+//
+// Besides the add recurrences the analysis itself is generally a
+// straightforward translation from JIT IR into the SCEV IR. Creating the add
+// recurrences requires paying attention to the structure of PHIs, and
+// disambiguating the values coming from outside the loop and the values coming
+// from the backedges.
+//
+
+#include "jitpch.h"
+
+//------------------------------------------------------------------------
+// GetConstantValue: If this SSA use refers to a constant, then fetch that
+// constant.
+//
+// Parameters:
+// comp - Compiler instance
+// cns - [out] Constant value; only valid if this function returns true.
+//
+// Returns:
+// True if this SSA use refers to a constant; otherwise false,
+//
+bool ScevLocal::GetConstantValue(Compiler* comp, int64_t* cns)
+{
+ LclVarDsc* dsc = comp->lvaGetDesc(LclNum);
+ LclSsaVarDsc* ssaDsc = dsc->GetPerSsaData(SsaNum);
+ GenTreeLclVarCommon* defNode = ssaDsc->GetDefNode();
+ if ((defNode != nullptr) && defNode->Data()->OperIs(GT_CNS_INT, GT_CNS_LNG))
+ {
+ *cns = defNode->Data()->AsIntConCommon()->IntegralValue();
+ return true;
+ }
+
+ return false;
+}
+
+//------------------------------------------------------------------------
+// Scev::GetConstantValue: If this SCEV is always a constant (i.e. either an
+// inline constant or an SSA use referring to a constant) then obtain that
+// constant.
+//
+// Parameters:
+// comp - Compiler instance
+// cns - [out] Constant value; only valid if this function returns true.
+//
+// Returns:
+// True if a constant could be extracted.
+//
+bool Scev::GetConstantValue(Compiler* comp, int64_t* cns)
+{
+ if (OperIs(ScevOper::Constant))
+ {
+ *cns = ((ScevConstant*)this)->Value;
+ return true;
+ }
+
+ if (OperIs(ScevOper::Local))
+ {
+ return ((ScevLocal*)this)->GetConstantValue(comp, cns);
+ }
+
+ return false;
+}
+
+#ifdef DEBUG
+//------------------------------------------------------------------------
+// Dump: Print this scev node to stdout.
+//
+// Parameters:
+// comp - Compiler instance
+//
+void Scev::Dump(Compiler* comp)
+{
+ switch (Oper)
+ {
+ case ScevOper::Constant:
+ {
+ ScevConstant* cns = (ScevConstant*)this;
+ printf("%zd", (ssize_t)cns->Value);
+ break;
+ }
+ case ScevOper::Local:
+ {
+ ScevLocal* invariantLocal = (ScevLocal*)this;
+ printf("V%02u.%u", invariantLocal->LclNum, invariantLocal->SsaNum);
+
+ int64_t cns;
+ if (invariantLocal->GetConstantValue(comp, &cns))
+ {
+ printf(" (%lld)", (long long)cns);
+ }
+ break;
+ }
+ case ScevOper::ZeroExtend:
+ case ScevOper::SignExtend:
+ {
+ ScevUnop* unop = (ScevUnop*)this;
+ printf("%cext<%d>(", unop->Oper == ScevOper::ZeroExtend ? 'z' : 's', genTypeSize(unop->Type) * 8);
+ unop->Op1->Dump(comp);
+ printf(")");
+ break;
+ }
+ case ScevOper::Add:
+ case ScevOper::Mul:
+ case ScevOper::Lsh:
+ {
+ ScevBinop* binop = (ScevBinop*)this;
+ printf("(");
+ binop->Op1->Dump(comp);
+ const char* op;
+ switch (binop->Oper)
+ {
+ case ScevOper::Add:
+ op = "+";
+ break;
+ case ScevOper::Mul:
+ op = "*";
+ break;
+ case ScevOper::Lsh:
+ op = "<<";
+ break;
+ default:
+ unreached();
+ }
+ printf(" %s ", op);
+ binop->Op2->Dump(comp);
+ printf(")");
+ break;
+ }
+ case ScevOper::AddRec:
+ {
+ ScevAddRec* addRec = (ScevAddRec*)this;
+ printf("<" FMT_LP, addRec->Loop->GetIndex());
+ printf(", ");
+ addRec->Start->Dump(comp);
+ printf(", ");
+ addRec->Step->Dump(comp);
+ printf(">");
+ break;
+ }
+ default:
+ unreached();
+ }
+}
+#endif
+
+//------------------------------------------------------------------------
+// ScalarEvolutionContext: Construct an instance of a context to do scalar evolution in.
+//
+// Parameters:
+// comp - Compiler instance
+//
+// Remarks:
+// After construction the context should be reset for a new loop by calling
+// ResetForLoop.
+//
+ScalarEvolutionContext::ScalarEvolutionContext(Compiler* comp)
+ : m_comp(comp), m_cache(comp->getAllocator(CMK_LoopIVOpts)), m_ephemeralCache(comp->getAllocator(CMK_LoopIVOpts))
+{
+}
+
+//------------------------------------------------------------------------
+// ResetForLoop: Reset the internal cache in preparation of scalar
+// evolution analysis inside a new loop.
+//
+// Parameters:
+// loop - The loop.
+//
+void ScalarEvolutionContext::ResetForLoop(FlowGraphNaturalLoop* loop)
+{
+ m_loop = loop;
+ m_cache.RemoveAll();
+}
+
+//------------------------------------------------------------------------
+// NewConstant: Create a SCEV node that represents a constant.
+//
+// Returns:
+// The new node.
+//
+ScevConstant* ScalarEvolutionContext::NewConstant(var_types type, int64_t value)
+{
+ ScevConstant* constant = new (m_comp, CMK_LoopIVOpts) ScevConstant(type, value);
+ return constant;
+}
+
+//------------------------------------------------------------------------
+// NewLocal: Create a SCEV node that represents an invariant local (i.e. a
+// use of an SSA def from outside the loop).
+//
+// Parameters:
+// lclNum - The local
+// ssaNum - The SSA number of the def outside the loop that is being used.
+//
+// Returns:
+// The new node.
+//
+ScevLocal* ScalarEvolutionContext::NewLocal(unsigned lclNum, unsigned ssaNum)
+{
+ var_types type = genActualType(m_comp->lvaGetDesc(lclNum));
+ ScevLocal* invariantLocal = new (m_comp, CMK_LoopIVOpts) ScevLocal(type, lclNum, ssaNum);
+ return invariantLocal;
+}
+
+//------------------------------------------------------------------------
+// NewExtension: Create a SCEV node that represents a zero or sign extension.
+//
+// Parameters:
+// oper - The operation (ScevOper::ZeroExtend or ScevOper::SignExtend)
+// targetType - The target type of the extension
+// op - The operand being extended.
+//
+// Returns:
+// The new node.
+//
+ScevUnop* ScalarEvolutionContext::NewExtension(ScevOper oper, var_types targetType, Scev* op)
+{
+ assert(op != nullptr);
+ ScevUnop* ext = new (m_comp, CMK_LoopIVOpts) ScevUnop(oper, targetType, op);
+ return ext;
+}
+
+//------------------------------------------------------------------------
+// NewBinop: Create a SCEV node that represents a binary operation.
+//
+// Parameters:
+// oper - The operation
+// op1 - First operand
+// op2 - Second operand
+//
+// Returns:
+// The new node.
+//
+ScevBinop* ScalarEvolutionContext::NewBinop(ScevOper oper, Scev* op1, Scev* op2)
+{
+ assert((op1 != nullptr) && (op2 != nullptr));
+ ScevBinop* binop = new (m_comp, CMK_LoopIVOpts) ScevBinop(oper, op1->Type, op1, op2);
+ return binop;
+}
+
+//------------------------------------------------------------------------
+// NewAddRec: Create a SCEV node that represents a new add recurrence.
+//
+// Parameters:
+// loop - The loop where this add recurrence is evolving
+// start - Value of the recurrence at the first iteration
+// step - Step value of the recurrence
+//
+// Returns:
+// The new node.
+//
+ScevAddRec* ScalarEvolutionContext::NewAddRec(Scev* start, Scev* step)
+{
+ assert((start != nullptr) && (step != nullptr));
+ ScevAddRec* addRec = new (m_comp, CMK_LoopIVOpts) ScevAddRec(start->Type, start, step DEBUGARG(m_loop));
+ return addRec;
+}
+
+//------------------------------------------------------------------------
+// CreateSimpleInvariantScev: Create a "simple invariant" SCEV node for a tree:
+// either an invariant local use or a constant.
+//
+// Parameters:
+// tree - The tree
+//
+// Returns:
+// SCEV node or nullptr if the tree is not a simple invariant.
+//
+Scev* ScalarEvolutionContext::CreateSimpleInvariantScev(GenTree* tree)
+{
+ if (tree->OperIs(GT_CNS_INT, GT_CNS_LNG))
+ {
+ return CreateScevForConstant(tree->AsIntConCommon());
+ }
+
+ if (tree->OperIs(GT_LCL_VAR) && tree->AsLclVarCommon()->HasSsaName())
+ {
+ LclVarDsc* dsc = m_comp->lvaGetDesc(tree->AsLclVarCommon());
+ LclSsaVarDsc* ssaDsc = dsc->GetPerSsaData(tree->AsLclVarCommon()->GetSsaNum());
+
+ if ((ssaDsc->GetBlock() == nullptr) || !m_loop->ContainsBlock(ssaDsc->GetBlock()))
+ {
+ return NewLocal(tree->AsLclVarCommon()->GetLclNum(), tree->AsLclVarCommon()->GetSsaNum());
+ }
+ }
+
+ return nullptr;
+}
+
+//------------------------------------------------------------------------
+// CreateScevForConstant: Given an integer constant, create a SCEV node for it.
+//
+// Parameters:
+// tree - The integer constant
+//
+// Returns:
+// SCEV node or nullptr if the integer constant is not representable (e.g. a handle).
+//
+Scev* ScalarEvolutionContext::CreateScevForConstant(GenTreeIntConCommon* tree)
+{
+ if (tree->IsIconHandle() || !tree->TypeIs(TYP_INT, TYP_LONG))
+ {
+ return nullptr;
+ }
+
+ return NewConstant(tree->TypeGet(), tree->AsIntConCommon()->IntegralValue());
+}
+
+//------------------------------------------------------------------------
+// AnalyzeNew: Analyze the specified tree in the specified block, without going
+// through the cache.
+//
+// Parameters:
+// block - Block containing the tree
+// tree - Tree node
+// depth - Current analysis depth
+//
+// Returns:
+// SCEV node if the tree was analyzable; otherwise nullptr if the value is
+// cannot be described.
+//
+Scev* ScalarEvolutionContext::AnalyzeNew(BasicBlock* block, GenTree* tree, int depth)
+{
+ switch (tree->OperGet())
+ {
+ case GT_CNS_INT:
+ case GT_CNS_LNG:
+ {
+ return CreateScevForConstant(tree->AsIntConCommon());
+ }
+ case GT_LCL_VAR:
+ case GT_PHI_ARG:
+ {
+ if (!tree->AsLclVarCommon()->HasSsaName())
+ {
+ return nullptr;
+ }
+
+ assert(m_comp->lvaInSsa(tree->AsLclVarCommon()->GetLclNum()));
+ LclVarDsc* dsc = m_comp->lvaGetDesc(tree->AsLclVarCommon());
+ LclSsaVarDsc* ssaDsc = dsc->GetPerSsaData(tree->AsLclVarCommon()->GetSsaNum());
+
+ if ((ssaDsc->GetBlock() == nullptr) || !m_loop->ContainsBlock(ssaDsc->GetBlock()))
+ {
+ return NewLocal(tree->AsLclVarCommon()->GetLclNum(), tree->AsLclVarCommon()->GetSsaNum());
+ }
+
+ if (ssaDsc->GetDefNode() == nullptr)
+ {
+ // GT_CALL retbuf def?
+ return nullptr;
+ }
+
+ if (ssaDsc->GetDefNode()->GetLclNum() != tree->AsLclVarCommon()->GetLclNum())
+ {
+ // Should be a def of the parent
+ assert(dsc->lvIsStructField && (ssaDsc->GetDefNode()->GetLclNum() == dsc->lvParentLcl));
+ return nullptr;
+ }
+
+ return Analyze(ssaDsc->GetBlock(), ssaDsc->GetDefNode(), depth + 1);
+ }
+ case GT_STORE_LCL_VAR:
+ {
+ GenTreeLclVarCommon* store = tree->AsLclVarCommon();
+ GenTree* data = store->Data();
+ if (!data->OperIs(GT_PHI))
+ {
+ return Analyze(block, data, depth + 1);
+ }
+
+ if (block != m_loop->GetHeader())
+ {
+ return nullptr;
+ }
+
+ // We have a phi def for the current loop. Look for a primary
+ // induction variable.
+ GenTreePhi* phi = data->AsPhi();
+ GenTreePhiArg* enterSsa = nullptr;
+ GenTreePhiArg* backedgeSsa = nullptr;
+
+ for (GenTreePhi::Use& use : phi->Uses())
+ {
+ GenTreePhiArg* phiArg = use.GetNode()->AsPhiArg();
+ GenTreePhiArg*& ssaArg = m_loop->ContainsBlock(phiArg->gtPredBB) ? backedgeSsa : enterSsa;
+ if ((ssaArg == nullptr) || (ssaArg->GetSsaNum() == phiArg->GetSsaNum()))
+ {
+ ssaArg = phiArg;
+ }
+ else
+ {
+ return nullptr;
+ }
+ }
+
+ if ((enterSsa == nullptr) || (backedgeSsa == nullptr))
+ {
+ return nullptr;
+ }
+
+ ScevLocal* enterScev = NewLocal(enterSsa->GetLclNum(), enterSsa->GetSsaNum());
+
+ LclVarDsc* dsc = m_comp->lvaGetDesc(store);
+ LclSsaVarDsc* ssaDsc = dsc->GetPerSsaData(backedgeSsa->GetSsaNum());
+
+ if (ssaDsc->GetDefNode() == nullptr)
+ {
+ // GT_CALL retbuf def
+ return nullptr;
+ }
+
+ if (ssaDsc->GetDefNode()->GetLclNum() != store->GetLclNum())
+ {
+ assert(dsc->lvIsStructField && ssaDsc->GetDefNode()->GetLclNum() == dsc->lvParentLcl);
+ return nullptr;
+ }
+
+ assert(ssaDsc->GetBlock() != nullptr);
+
+ Scev* simpleAddRec = CreateSimpleAddRec(store, enterScev, ssaDsc->GetBlock(), ssaDsc->GetDefNode()->Data());
+ if (simpleAddRec != nullptr)
+ {
+ return simpleAddRec;
+ }
+
+ ScevConstant* symbolicAddRec = NewConstant(data->TypeGet(), 0xdeadbeef);
+ m_ephemeralCache.Emplace(store, symbolicAddRec);
+
+ Scev* result;
+ if (m_usingEphemeralCache)
+ {
+ result = Analyze(ssaDsc->GetBlock(), ssaDsc->GetDefNode()->Data(), depth + 1);
+ }
+ else
+ {
+ m_usingEphemeralCache = true;
+ result = Analyze(ssaDsc->GetBlock(), ssaDsc->GetDefNode()->Data(), depth + 1);
+ m_usingEphemeralCache = false;
+ m_ephemeralCache.RemoveAll();
+ }
+
+ if (result == nullptr)
+ {
+ return nullptr;
+ }
+
+ return MakeAddRecFromRecursiveScev(enterScev, result, symbolicAddRec);
+ }
+ case GT_CAST:
+ {
+ GenTreeCast* cast = tree->AsCast();
+ if (cast->gtCastType != TYP_LONG)
+ {
+ return nullptr;
+ }
+
+ Scev* op = Analyze(block, cast->CastOp(), depth + 1);
+ if (op == nullptr)
+ {
+ return nullptr;
+ }
+
+ return NewExtension(cast->IsUnsigned() ? ScevOper::ZeroExtend : ScevOper::SignExtend, TYP_LONG, op);
+ }
+ case GT_ADD:
+ case GT_MUL:
+ case GT_LSH:
+ {
+ Scev* op1 = Analyze(block, tree->gtGetOp1(), depth + 1);
+ if (op1 == nullptr)
+ return nullptr;
+
+ Scev* op2 = Analyze(block, tree->gtGetOp2(), depth + 1);
+ if (op2 == nullptr)
+ return nullptr;
+
+ ScevOper oper;
+ switch (tree->OperGet())
+ {
+ case GT_ADD:
+ oper = ScevOper::Add;
+ break;
+ case GT_MUL:
+ oper = ScevOper::Mul;
+ break;
+ case GT_LSH:
+ oper = ScevOper::Lsh;
+ break;
+ default:
+ unreached();
+ }
+
+ return NewBinop(oper, op1, op2);
+ }
+ case GT_COMMA:
+ {
+ return Analyze(block, tree->gtGetOp2(), depth + 1);
+ }
+ case GT_ARR_ADDR:
+ {
+ return Analyze(block, tree->AsArrAddr()->Addr(), depth + 1);
+ }
+ default:
+ return nullptr;
+ }
+}
+
+//------------------------------------------------------------------------
+// CreateSimpleAddRec: Create a "simple" add-recurrence. This handles the most
+// common patterns for primary induction variables where we see a store like
+// "i = i + 1".
+//
+// Parameters:
+// headerStore - Phi definition of the candidate primary induction variable
+// enterScev - SCEV describing start value of the primary induction variable
+// stepDefBlock - Block containing the def of the step value
+// stepDefData - Value of the def of the step value
+//
+// Returns:
+// SCEV node if this is a simple addrec shape. Otherwise nullptr.
+//
+Scev* ScalarEvolutionContext::CreateSimpleAddRec(GenTreeLclVarCommon* headerStore,
+ ScevLocal* enterScev,
+ BasicBlock* stepDefBlock,
+ GenTree* stepDefData)
+{
+ if (!stepDefData->OperIs(GT_ADD))
+ {
+ return nullptr;
+ }
+
+ GenTree* stepTree;
+ GenTree* op1 = stepDefData->gtGetOp1();
+ GenTree* op2 = stepDefData->gtGetOp2();
+ if (op1->OperIs(GT_LCL_VAR) && (op1->AsLclVar()->GetLclNum() == headerStore->GetLclNum()) &&
+ (op1->AsLclVar()->GetSsaNum() == headerStore->GetSsaNum()))
+ {
+ stepTree = op2;
+ }
+ else if (op2->OperIs(GT_LCL_VAR) && (op2->AsLclVar()->GetLclNum() == headerStore->GetLclNum()) &&
+ (op2->AsLclVar()->GetSsaNum() == headerStore->GetSsaNum()))
+ {
+ stepTree = op1;
+ }
+ else
+ {
+ // Not a simple IV shape (i.e. more complex than "i = i + k")
+ return nullptr;
+ }
+
+ Scev* stepScev = CreateSimpleInvariantScev(stepTree);
+ if (stepScev == nullptr)
+ {
+ return nullptr;
+ }
+
+ return NewAddRec(enterScev, stepScev);
+}
+
+//------------------------------------------------------------------------
+// ExtractAddOperands: Extract all operands of potentially nested add
+// operations.
+//
+// Parameters:
+// binop - The binop representing an add
+// operands - Array stack to add the operands to
+//
+void ScalarEvolutionContext::ExtractAddOperands(ScevBinop* binop, ArrayStack<Scev*>& operands)
+{
+ assert(binop->OperIs(ScevOper::Add));
+
+ if (binop->Op1->OperIs(ScevOper::Add))
+ {
+ ExtractAddOperands(static_cast<ScevBinop*>(binop->Op1), operands);
+ }
+ else
+ {
+ operands.Push(binop->Op1);
+ }
+
+ if (binop->Op2->OperIs(ScevOper::Add))
+ {
+ ExtractAddOperands(static_cast<ScevBinop*>(binop->Op2), operands);
+ }
+ else
+ {
+ operands.Push(binop->Op2);
+ }
+}
+
+//------------------------------------------------------------------------
+// MakeAddRecFromRecursiveScev: Given a recursive SCEV and a symbolic SCEV
+// whose appearances represent an occurrence of the full SCEV, create a
+// non-recursive add-rec from it.
+//
+// Parameters:
+// startScev - The start value of the addrec
+// scev - The scev
+// recursiveScev - A symbolic node whose appearance represents the value of "scev"
+//
+// Returns:
+// A non-recursive addrec
+//
+Scev* ScalarEvolutionContext::MakeAddRecFromRecursiveScev(Scev* startScev, Scev* scev, Scev* recursiveScev)
+{
+ if (!scev->OperIs(ScevOper::Add))
+ {
+ return nullptr;
+ }
+
+ ArrayStack<Scev*> addOperands(m_comp->getAllocator(CMK_LoopIVOpts));
+ ExtractAddOperands(static_cast<ScevBinop*>(scev), addOperands);
+
+ assert(addOperands.Height() >= 2);
+
+ int numAppearances = 0;
+ for (int i = 0; i < addOperands.Height(); i++)
+ {
+ Scev* addOperand = addOperands.Bottom(i);
+ if (addOperand == recursiveScev)
+ {
+ numAppearances++;
+ }
+ else
+ {
+ ScevVisit result = addOperand->Visit([=](Scev* node) {
+ if (node == recursiveScev)
+ {
+ return ScevVisit::Abort;
+ }
+
+ return ScevVisit::Continue;
+ });
+
+ if (result == ScevVisit::Abort)
+ {
+ // We do not handle nested occurrences. Some of these may be representable, some won't.
+ return nullptr;
+ }
+ }
+ }
+
+ if (numAppearances == 0)
+ {
+ // TODO-CQ: We currently cannot handle cases like
+ // i = arr.Length;
+ // j = i - 1;
+ // i = j;
+ // while (true) { ...; j = i - 1; i = j; }
+ //
+ // These cases can arise from loop structures like "for (int i =
+ // arr.Length; --i >= 0;)" when Roslyn emits a "sub; dup; stloc"
+ // sequence, and local prop + loop inversion converts the duplicated
+ // local into a fully fledged IV.
+ // In this case we see that i = <L, [i from outside loop], -1>, but for
+ // j we will see <L, [i from outside loop], -1> + (-1) in this function
+ // as the value coming around the backedge, and we cannot reconcile
+ // this.
+ //
+ return nullptr;
+ }
+
+ if (numAppearances > 1)
+ {
+ // Multiple occurrences -- cannot be represented as an addrec
+ // (corresponds to a geometric progression).
+ return nullptr;
+ }
+
+ Scev* step = nullptr;
+ for (int i = 0; i < addOperands.Height(); i++)
+ {
+ Scev* addOperand = addOperands.Bottom(i);
+ if (addOperand == recursiveScev)
+ {
+ continue;
+ }
+
+ if (step == nullptr)
+ {
+ step = addOperand;
+ }
+ else
+ {
+ step = NewBinop(ScevOper::Add, step, addOperand);
+ }
+ }
+
+ return NewAddRec(startScev, step);
+}
+
+//------------------------------------------------------------------------
+// Analyze: Analyze the specified tree in the specified block.
+//
+// Parameters:
+// block - Block containing the tree
+// tree - Tree node
+//
+// Returns:
+// SCEV node if the tree was analyzable; otherwise nullptr if the value is
+// cannot be described.
+//
+Scev* ScalarEvolutionContext::Analyze(BasicBlock* block, GenTree* tree)
+{
+ return Analyze(block, tree, 0);
+}
+
+// Since the analysis follows SSA defs we have no upper bound on the potential
+// depth of the analysis performed. We put an artificial limit on this for two
+// reasons:
+// 1. The analysis is recursive, and we should not stack overflow regardless of
+// the input program.
+// 2. If we produced arbitrarily deep SCEV trees then all algorithms over their
+// structure would similarly be at risk of stack overflows if they were
+// recursive. However, these algorithms are generally much more elegant when
+// they make use of recursion.
+const int SCALAR_EVOLUTION_ANALYSIS_MAX_DEPTH = 64;
+
+//------------------------------------------------------------------------
+// Analyze: Analyze the specified tree in the specified block.
+//
+// Parameters:
+// block - Block containing the tree
+// tree - Tree node
+// depth - Current analysis depth
+//
+// Returns:
+// SCEV node if the tree was analyzable; otherwise nullptr if the value is
+// cannot be described.
+//
+Scev* ScalarEvolutionContext::Analyze(BasicBlock* block, GenTree* tree, int depth)
+{
+ Scev* result;
+ if (!m_cache.Lookup(tree, &result) && (!m_usingEphemeralCache || !m_ephemeralCache.Lookup(tree, &result)))
+ {
+ if (depth >= SCALAR_EVOLUTION_ANALYSIS_MAX_DEPTH)
+ {
+ return nullptr;
+ }
+
+ result = AnalyzeNew(block, tree, depth);
+
+ if (m_usingEphemeralCache)
+ {
+ m_ephemeralCache.Set(tree, result, ScalarEvolutionMap::Overwrite);
+ }
+ else
+ {
+ m_cache.Set(tree, result);
+ }
+ }
+
+ return result;
+}
+
+//------------------------------------------------------------------------
+// FoldBinop: Fold simple binops.
+//
+// Type parameters:
+// T - Type that the binop is being evaluated in
+//
+// Parameters:
+// oper - Binary operation
+// op1 - First operand
+// op2 - Second operand
+//
+// Returns:
+// Folded value.
+//
+template <typename T>
+static T FoldBinop(ScevOper oper, T op1, T op2)
+{
+ switch (oper)
+ {
+ case ScevOper::Add:
+ return op1 + op2;
+ case ScevOper::Mul:
+ return op1 * op2;
+ case ScevOper::Lsh:
+ return op1 << op2;
+ default:
+ unreached();
+ }
+}
+
+//------------------------------------------------------------------------
+// Simplify: Try to simplify a SCEV node by folding and canonicalization.
+//
+// Parameters:
+// scev - The node
+//
+// Returns:
+// Simplified node.
+//
+// Remarks:
+// Canonicalization is done for binops; constants are moved to the right and
+// addrecs are moved to the left.
+//
+// Simple unops/binops on constants are folded. Operands are distributed into
+// add recs whenever possible.
+//
+Scev* ScalarEvolutionContext::Simplify(Scev* scev)
+{
+ switch (scev->Oper)
+ {
+ case ScevOper::Constant:
+ case ScevOper::Local:
+ {
+ return scev;
+ }
+ case ScevOper::ZeroExtend:
+ case ScevOper::SignExtend:
+ {
+ ScevUnop* unop = (ScevUnop*)scev;
+ assert(genTypeSize(unop->Type) >= genTypeSize(unop->Op1->Type));
+
+ Scev* op1 = Simplify(unop->Op1);
+
+ if (unop->Type == op1->Type)
+ {
+ return op1;
+ }
+
+ assert((unop->Type == TYP_LONG) && (op1->Type == TYP_INT));
+
+ if (op1->OperIs(ScevOper::Constant))
+ {
+ ScevConstant* cns = (ScevConstant*)op1;
+ return NewConstant(unop->Type, unop->OperIs(ScevOper::ZeroExtend) ? (uint64_t)(int32_t)cns->Value
+ : (int64_t)(int32_t)cns->Value);
+ }
+
+ if (op1->OperIs(ScevOper::AddRec))
+ {
+ // TODO-Cleanup: This requires some proof that it is ok, but
+ // currently we do not rely on this.
+ return op1;
+ }
+
+ return (op1 == unop->Op1) ? unop : NewExtension(unop->Oper, unop->Type, op1);
+ }
+ case ScevOper::Add:
+ case ScevOper::Mul:
+ case ScevOper::Lsh:
+ {
+ ScevBinop* binop = (ScevBinop*)scev;
+ Scev* op1 = Simplify(binop->Op1);
+ Scev* op2 = Simplify(binop->Op2);
+
+ if (binop->OperIs(ScevOper::Add, ScevOper::Mul))
+ {
+ // Normalize addrecs to the left
+ if (op2->OperIs(ScevOper::AddRec) && !op1->OperIs(ScevOper::AddRec))
+ {
+ std::swap(op1, op2);
+ }
+ // Normalize constants to the right
+ if (op1->OperIs(ScevOper::Constant) && !op2->OperIs(ScevOper::Constant))
+ {
+ std::swap(op1, op2);
+ }
+ }
+
+ if (op1->OperIs(ScevOper::AddRec))
+ {
+ // <L, start, step> + x => <L, start + x, step>
+ // <L, start, step> * x => <L, start * x, step * x>
+ ScevAddRec* addRec = (ScevAddRec*)op1;
+ Scev* newStart = Simplify(NewBinop(binop->Oper, addRec->Start, op2));
+ Scev* newStep = scev->OperIs(ScevOper::Mul, ScevOper::Lsh)
+ ? Simplify(NewBinop(binop->Oper, addRec->Step, op2))
+ : addRec->Step;
+ return NewAddRec(newStart, newStep);
+ }
+
+ if (op1->OperIs(ScevOper::Constant) && op2->OperIs(ScevOper::Constant))
+ {
+ ScevConstant* cns1 = (ScevConstant*)op1;
+ ScevConstant* cns2 = (ScevConstant*)op2;
+ int64_t newValue;
+ if (binop->TypeIs(TYP_INT))
+ {
+ newValue = FoldBinop<int32_t>(binop->Oper, static_cast<int32_t>(cns1->Value),
+ static_cast<int32_t>(cns2->Value));
+ }
+ else
+ {
+ assert(binop->TypeIs(TYP_LONG));
+ newValue = FoldBinop<int64_t>(binop->Oper, cns1->Value, cns2->Value);
+ }
+
+ return NewConstant(binop->Type, newValue);
+ }
+
+ return (op1 == binop->Op1) && (op2 == binop->Op2) ? binop : NewBinop(binop->Oper, op1, op2);
+ }
+ case ScevOper::AddRec:
+ {
+ ScevAddRec* addRec = (ScevAddRec*)scev;
+ Scev* start = Simplify(addRec->Start);
+ Scev* step = Simplify(addRec->Step);
+ return (start == addRec->Start) && (step == addRec->Step) ? addRec : NewAddRec(start, step);
+ }
+ default:
+ unreached();
+ }
+}
diff --git a/src/coreclr/jit/scev.h b/src/coreclr/jit/scev.h
new file mode 100644
index 000000000000..0800be905503
--- /dev/null
+++ b/src/coreclr/jit/scev.h
@@ -0,0 +1,222 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#pragma once
+
+// This file contains the definition of the scalar evolution IR. This IR allows
+// representing the values of IR nodes inside loops in a closed form, taking
+// into account that they are changing on each loop iteration. The IR is based
+// around the following possible operations. At the core is ScevOper::AddRec,
+// which represents a value that evolves by an add recurrence. In dumps it is
+// described by <loop, start, step> where "loop" is the loop the value is
+// evolving in, "start" is the initial value and "step" is the step by which
+// the value evolves in every iteration.
+//
+// See scev.cpp for further documentation.
+//
+enum class ScevOper
+{
+ Constant,
+ Local,
+ ZeroExtend,
+ SignExtend,
+ Add,
+ Mul,
+ Lsh,
+ AddRec,
+};
+
+static bool ScevOperIs(ScevOper oper, ScevOper otherOper)
+{
+ return oper == otherOper;
+}
+
+template <typename... Args>
+static bool ScevOperIs(ScevOper oper, ScevOper operFirst, Args... operTail)
+{
+ return oper == operFirst || ScevOperIs(oper, operTail...);
+}
+
+enum class ScevVisit
+{
+ Abort,
+ Continue,
+};
+
+struct Scev
+{
+ const ScevOper Oper;
+ const var_types Type;
+
+ Scev(ScevOper oper, var_types type) : Oper(oper), Type(type)
+ {
+ }
+
+ template <typename... Args>
+ bool OperIs(Args... opers)
+ {
+ return ScevOperIs(Oper, opers...);
+ }
+
+ bool TypeIs(var_types type)
+ {
+ return Type == type;
+ }
+
+ bool GetConstantValue(Compiler* comp, int64_t* cns);
+
+#ifdef DEBUG
+ void Dump(Compiler* comp);
+#endif
+ template <typename TVisitor>
+ ScevVisit Visit(TVisitor visitor);
+};
+
+struct ScevConstant : Scev
+{
+ ScevConstant(var_types type, int64_t value) : Scev(ScevOper::Constant, type), Value(value)
+ {
+ }
+
+ int64_t Value;
+};
+
+struct ScevLocal : Scev
+{
+ ScevLocal(var_types type, unsigned lclNum, unsigned ssaNum)
+ : Scev(ScevOper::Local, type), LclNum(lclNum), SsaNum(ssaNum)
+ {
+ }
+
+ const unsigned LclNum;
+ const unsigned SsaNum;
+
+ bool GetConstantValue(Compiler* comp, int64_t* cns);
+};
+
+struct ScevUnop : Scev
+{
+ ScevUnop(ScevOper oper, var_types type, Scev* op1) : Scev(oper, type), Op1(op1)
+ {
+ }
+
+ Scev* const Op1;
+};
+
+struct ScevBinop : ScevUnop
+{
+ ScevBinop(ScevOper oper, var_types type, Scev* op1, Scev* op2) : ScevUnop(oper, type, op1), Op2(op2)
+ {
+ }
+
+ Scev* const Op2;
+};
+
+// Represents a value that evolves by an add recurrence.
+// The value at iteration N is Start + N * Step.
+// "Start" and "Step" are guaranteed to be invariant in "Loop".
+struct ScevAddRec : Scev
+{
+ ScevAddRec(var_types type, Scev* start, Scev* step DEBUGARG(FlowGraphNaturalLoop* loop))
+ : Scev(ScevOper::AddRec, type), Start(start), Step(step) DEBUGARG(Loop(loop))
+ {
+ }
+
+ Scev* const Start;
+ Scev* const Step;
+ INDEBUG(FlowGraphNaturalLoop* const Loop);
+};
+
+//------------------------------------------------------------------------
+// Scev::Visit: Recursively visit all SCEV nodes in the SCEV tree.
+//
+// Parameters:
+// visitor - Callback with signature Scev* -> ScevVisit.
+//
+// Returns:
+// ScevVisit::Abort if "visitor" aborted, otherwise ScevVisit::Continue.
+//
+// Remarks:
+// The visit is done in preorder.
+//
+template <typename TVisitor>
+ScevVisit Scev::Visit(TVisitor visitor)
+{
+ if (visitor(this) == ScevVisit::Abort)
+ return ScevVisit::Abort;
+
+ switch (Oper)
+ {
+ case ScevOper::Constant:
+ case ScevOper::Local:
+ break;
+ case ScevOper::ZeroExtend:
+ case ScevOper::SignExtend:
+ return static_cast<ScevUnop*>(this)->Op1->Visit(visitor);
+ case ScevOper::Add:
+ case ScevOper::Mul:
+ case ScevOper::Lsh:
+ {
+ ScevBinop* binop = static_cast<ScevBinop*>(this);
+ if (binop->Op1->Visit(visitor) == ScevVisit::Abort)
+ return ScevVisit::Abort;
+
+ return binop->Op2->Visit(visitor);
+ }
+ case ScevOper::AddRec:
+ {
+ ScevAddRec* addrec = static_cast<ScevAddRec*>(this);
+ if (addrec->Start->Visit(visitor) == ScevVisit::Abort)
+ return ScevVisit::Abort;
+
+ return addrec->Step->Visit(visitor);
+ }
+ default:
+ unreached();
+ }
+
+ return ScevVisit::Continue;
+}
+
+typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, Scev*> ScalarEvolutionMap;
+
+// Scalar evolution is analyzed in the context of a single loop, and are
+// computed on-demand by the use of the "Analyze" method on this class, which
+// also maintains a cache.
+class ScalarEvolutionContext
+{
+ Compiler* m_comp;
+ FlowGraphNaturalLoop* m_loop = nullptr;
+ ScalarEvolutionMap m_cache;
+
+ // During analysis of PHIs we insert a symbolic node representing the
+ // "recurrence"; we use this cache to be able to invalidate things that end
+ // up depending on the symbolic node quickly.
+ ScalarEvolutionMap m_ephemeralCache;
+ bool m_usingEphemeralCache = false;
+
+ Scev* Analyze(BasicBlock* block, GenTree* tree, int depth);
+ Scev* AnalyzeNew(BasicBlock* block, GenTree* tree, int depth);
+ Scev* CreateSimpleAddRec(GenTreeLclVarCommon* headerStore,
+ ScevLocal* start,
+ BasicBlock* stepDefBlock,
+ GenTree* stepDefData);
+ Scev* MakeAddRecFromRecursiveScev(Scev* start, Scev* scev, Scev* recursiveScev);
+ Scev* CreateSimpleInvariantScev(GenTree* tree);
+ Scev* CreateScevForConstant(GenTreeIntConCommon* tree);
+ void ExtractAddOperands(ScevBinop* add, ArrayStack<Scev*>& operands);
+
+public:
+ ScalarEvolutionContext(Compiler* comp);
+
+ void ResetForLoop(FlowGraphNaturalLoop* loop);
+
+ ScevConstant* NewConstant(var_types type, int64_t value);
+ ScevLocal* NewLocal(unsigned lclNum, unsigned ssaNum);
+ ScevUnop* NewExtension(ScevOper oper, var_types targetType, Scev* op);
+ ScevBinop* NewBinop(ScevOper oper, Scev* op1, Scev* op2);
+ ScevAddRec* NewAddRec(Scev* start, Scev* step);
+
+ Scev* Analyze(BasicBlock* block, GenTree* tree);
+ Scev* Simplify(Scev* scev);
+};
diff --git a/src/coreclr/jit/sideeffects.cpp b/src/coreclr/jit/sideeffects.cpp
index d2c1de6c749a..a2dd47c994ef 100644
--- a/src/coreclr/jit/sideeffects.cpp
+++ b/src/coreclr/jit/sideeffects.cpp
@@ -174,7 +174,7 @@ AliasSet::NodeInfo::NodeInfo(Compiler* compiler, GenTree* node)
// Is the operation a write? If so, set `node` to the location that is being written to.
bool isWrite = false;
- if (node->OperIsStore() || node->OperIs(GT_STORE_DYN_BLK, GT_MEMORYBARRIER))
+ if (node->OperIsStore() || node->OperIs(GT_MEMORYBARRIER))
{
isWrite = true;
}
diff --git a/src/coreclr/jit/simd.cpp b/src/coreclr/jit/simd.cpp
index d69730ad520e..48c23eb64641 100644
--- a/src/coreclr/jit/simd.cpp
+++ b/src/coreclr/jit/simd.cpp
@@ -549,8 +549,6 @@ bool areFieldAddressesTheSame(GenTreeFieldAddr* op1, GenTreeFieldAddr* op2)
bool Compiler::areFieldsContiguous(GenTreeIndir* op1, GenTreeIndir* op2)
{
assert(op1->isIndir() && op2->isIndir());
- // TODO-1stClassStructs: delete once IND<struct> nodes are no more.
- assert(!op1->TypeIs(TYP_STRUCT) && !op2->TypeIs(TYP_STRUCT));
var_types op1Type = op1->TypeGet();
var_types op2Type = op2->TypeGet();
diff --git a/src/coreclr/jit/switchrecognition.cpp b/src/coreclr/jit/switchrecognition.cpp
index 1008b81194f8..3b9c58b38518 100644
--- a/src/coreclr/jit/switchrecognition.cpp
+++ b/src/coreclr/jit/switchrecognition.cpp
@@ -319,6 +319,10 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t*
const bool isTest = IsConstantTestCondBlock(lastBlock, &blockIfTrue, &blockIfFalse, &isReversed);
assert(isTest);
+ assert(firstBlock->TrueTargetIs(blockIfTrue));
+ FlowEdge* const trueEdge = firstBlock->GetTrueEdge();
+ FlowEdge* const falseEdge = firstBlock->GetFalseEdge();
+
// Convert firstBlock to a switch block
firstBlock->SetSwitch(new (this, CMK_BasicBlock) BBswtDesc);
firstBlock->bbCodeOffsEnd = lastBlock->bbCodeOffsEnd;
@@ -338,8 +342,9 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t*
gtUpdateStmtSideEffects(firstBlock->lastStmt());
// Unlink and remove the whole chain of conditional blocks
- BasicBlock* blockToRemove = firstBlock->Next();
- fgRemoveRefPred(blockToRemove, firstBlock);
+ fgRemoveRefPred(falseEdge);
+ BasicBlock* blockToRemove = falseEdge->getDestinationBlock();
+ assert(firstBlock->NextIs(blockToRemove));
while (!lastBlock->NextIs(blockToRemove))
{
blockToRemove = fgRemoveBlock(blockToRemove, true);
@@ -361,18 +366,20 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t*
if (isReversed)
{
assert(lastBlock->FalseTargetIs(blockIfTrue));
- fgRemoveRefPred(blockIfTrue, firstBlock);
+ fgRemoveRefPred(trueEdge);
BasicBlock* targetBlock = blockIfTrue;
- blockIfTrue = fgNewBBafter(BBJ_ALWAYS, firstBlock, true, targetBlock);
+ blockIfTrue = fgNewBBafter(BBJ_ALWAYS, firstBlock, true);
FlowEdge* const newEdge = fgAddRefPred(targetBlock, blockIfTrue);
skipPredRemoval = true;
+ blockIfTrue->SetTargetEdge(newEdge);
}
else
{
assert(lastBlock->FalseTargetIs(blockIfFalse));
BasicBlock* targetBlock = blockIfFalse;
- blockIfFalse = fgNewBBafter(BBJ_ALWAYS, firstBlock, true, targetBlock);
+ blockIfFalse = fgNewBBafter(BBJ_ALWAYS, firstBlock, true);
FlowEdge* const newEdge = fgAddRefPred(targetBlock, blockIfFalse);
+ blockIfFalse->SetTargetEdge(newEdge);
}
}
@@ -397,7 +404,7 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t*
// Unlink blockIfTrue from firstBlock, we're going to link it again in the loop below.
if (!skipPredRemoval)
{
- fgRemoveRefPred(blockIfTrue, firstBlock);
+ fgRemoveRefPred(trueEdge);
}
for (unsigned i = 0; i < jumpCount; i++)
diff --git a/src/coreclr/jit/targetamd64.h b/src/coreclr/jit/targetamd64.h
index 4abe71984b57..09d6729f9dc6 100644
--- a/src/coreclr/jit/targetamd64.h
+++ b/src/coreclr/jit/targetamd64.h
@@ -563,4 +563,9 @@
#define RBM_STACK_PROBE_HELPER_TRASH RBM_RAX
#endif // !UNIX_AMD64_ABI
+ #define SWIFT_SUPPORT
+ #define REG_SWIFT_ERROR REG_R12
+ #define RBM_SWIFT_ERROR RBM_R12
+ #define REG_SWIFT_SELF REG_R13
+
// clang-format on
diff --git a/src/coreclr/jit/targetarm64.h b/src/coreclr/jit/targetarm64.h
index 3646ecb4407b..ce038021bc52 100644
--- a/src/coreclr/jit/targetarm64.h
+++ b/src/coreclr/jit/targetarm64.h
@@ -370,4 +370,9 @@
#define REG_ZERO_INIT_FRAME_REG2 REG_R10
#define REG_ZERO_INIT_FRAME_SIMD REG_V16
+ #define SWIFT_SUPPORT
+ #define REG_SWIFT_ERROR REG_R21
+ #define RBM_SWIFT_ERROR RBM_R21
+ #define REG_SWIFT_SELF REG_R20
+
// clang-format on
diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp
index cf4954762ceb..79cc55483554 100644
--- a/src/coreclr/jit/valuenum.cpp
+++ b/src/coreclr/jit/valuenum.cpp
@@ -9573,8 +9573,8 @@ const uint8_t ValueNumStore::s_vnfOpAttribs[VNF_COUNT] = {
static genTreeOps genTreeOpsIllegalAsVNFunc[] = {GT_IND, // When we do heap memory.
GT_NULLCHECK, GT_QMARK, GT_COLON, GT_LOCKADD, GT_XADD, GT_XCHG,
- GT_CMPXCHG, GT_LCLHEAP, GT_BOX, GT_XORR, GT_XAND, GT_STORE_DYN_BLK,
- GT_STORE_LCL_VAR, GT_STORE_LCL_FLD, GT_STOREIND, GT_STORE_BLK,
+ GT_CMPXCHG, GT_LCLHEAP, GT_BOX, GT_XORR, GT_XAND, GT_STORE_LCL_VAR,
+ GT_STORE_LCL_FLD, GT_STOREIND, GT_STORE_BLK,
// These need special semantics:
GT_COMMA, // == second argument (but with exception(s) from first).
GT_ARR_ADDR, GT_BOUNDS_CHECK,
@@ -9874,7 +9874,7 @@ public:
return false;
}
- if (!predBlock->KindIs(BBJ_COND) || predBlock->TrueTargetIs(predBlock->GetFalseTarget()))
+ if (!predBlock->KindIs(BBJ_COND) || predBlock->TrueEdgeIs(predBlock->GetFalseEdge()))
{
return true;
}
@@ -11324,7 +11324,9 @@ void Compiler::fgValueNumberTree(GenTree* tree)
break;
case GT_CATCH_ARG:
+ case GT_SWIFT_ERROR:
// We know nothing about the value of a caught expression.
+ // We also know nothing about the error register's value post-Swift call.
tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
break;
@@ -11486,12 +11488,7 @@ void Compiler::fgValueNumberTree(GenTree* tree)
unsigned loadSize = tree->AsIndir()->Size();
VNFuncApp funcApp{VNF_COUNT};
- // TODO-1stClassStructs: delete layout-less "IND(struct)" nodes and the "loadSize == 0" condition.
- if (loadSize == 0)
- {
- tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, loadType));
- }
- else if (fgValueNumberConstLoad(tree->AsIndir()))
+ if (fgValueNumberConstLoad(tree->AsIndir()))
{
// VN is assigned inside fgValueNumberConstLoad
}
@@ -11758,30 +11755,6 @@ void Compiler::fgValueNumberTree(GenTree* tree)
break;
#endif // FEATURE_HW_INTRINSICS
- case GT_STORE_DYN_BLK:
- {
- // Conservatively, mutate the heaps - we don't analyze these rare stores.
- // Likewise, any locals possibly defined by them we mark as address-exposed.
- fgMutateGcHeap(tree DEBUGARG("dynamic block store"));
-
- GenTreeStoreDynBlk* store = tree->AsStoreDynBlk();
- ValueNumPair vnpExcSet = ValueNumStore::VNPForEmptyExcSet();
-
- // Propagate the exceptions...
- vnpExcSet = vnStore->VNPUnionExcSet(store->Addr()->gtVNPair, vnpExcSet);
- vnpExcSet = vnStore->VNPUnionExcSet(store->Data()->gtVNPair, vnpExcSet);
- vnpExcSet = vnStore->VNPUnionExcSet(store->gtDynamicSize->gtVNPair, vnpExcSet);
-
- // This is a store, it produces no value. Thus we use VNPForVoid().
- store->gtVNPair = vnStore->VNPWithExc(vnStore->VNPForVoid(), vnpExcSet);
-
- // Note that we are only adding the exception for the destination address.
- // Currently, "Data()" is an explicit indirection in case this is a "cpblk".
- assert(store->Data()->gtEffectiveVal()->OperIsIndir() || store->OperIsInitBlkOp());
- fgValueNumberAddExceptionSetForIndirection(store, store->Addr());
- break;
- }
-
case GT_CMPXCHG: // Specialop
{
// For CMPXCHG and other intrinsics add an arbitrary side effect on GcHeap/ByrefExposed.
diff --git a/src/coreclr/nativeaot/Common/src/Internal/Runtime/CompilerHelpers/StartupCodeHelpers.cs b/src/coreclr/nativeaot/Common/src/Internal/Runtime/CompilerHelpers/StartupCodeHelpers.cs
index 42e677737be9..d47cae0a18ed 100644
--- a/src/coreclr/nativeaot/Common/src/Internal/Runtime/CompilerHelpers/StartupCodeHelpers.cs
+++ b/src/coreclr/nativeaot/Common/src/Internal/Runtime/CompilerHelpers/StartupCodeHelpers.cs
@@ -290,12 +290,7 @@ namespace Internal.Runtime.CompilerHelpers
{
// At the time of writing this, 90% of DehydratedDataCommand.Copy cases
// would fall into the above specialized cases. 10% fall back to memmove.
- memmove(pDest, pCurrent, (nuint)payload);
-
- // Not a DllImport - we don't need a GC transition since this is early startup
- [MethodImplAttribute(MethodImplOptions.InternalCall)]
- [RuntimeImport("*", "memmove")]
- static extern unsafe void* memmove(byte* dmem, byte* smem, nuint size);
+ Unsafe.CopyBlock(pDest, pCurrent, (uint)payload);
}
pDest += payload;
diff --git a/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifier.cs b/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifier.cs
index 4e8b3ed564db..a790e0aeb501 100644
--- a/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifier.cs
+++ b/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifier.cs
@@ -65,7 +65,7 @@ namespace System.Collections.Concurrent
{
protected ConcurrentUnifier()
{
- _lock = new Lock(useTrivialWaits: true);
+ _lock = new Lock();
_container = new Container(this);
}
diff --git a/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierW.cs b/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierW.cs
index efca6e2efaeb..69cfde1e48e3 100644
--- a/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierW.cs
+++ b/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierW.cs
@@ -75,7 +75,7 @@ namespace System.Collections.Concurrent
{
protected ConcurrentUnifierW()
{
- _lock = new Lock(useTrivialWaits: true);
+ _lock = new Lock();
_container = new Container(this);
}
diff --git a/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierWKeyed.cs b/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierWKeyed.cs
index 7cd63314c35e..9ce60a515185 100644
--- a/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierWKeyed.cs
+++ b/src/coreclr/nativeaot/Common/src/System/Collections/Concurrent/ConcurrentUnifierWKeyed.cs
@@ -84,7 +84,7 @@ namespace System.Collections.Concurrent
{
protected ConcurrentUnifierWKeyed()
{
- _lock = new Lock(useTrivialWaits: true);
+ _lock = new Lock();
_container = new Container(this);
}
diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/CompilerServices/Unsafe.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/CompilerServices/Unsafe.cs
index 136872edd4a4..7f3df7a7893a 100644
--- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/CompilerServices/Unsafe.cs
+++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/CompilerServices/Unsafe.cs
@@ -119,5 +119,15 @@ namespace System.Runtime.CompilerServices
{
throw new PlatformNotSupportedException();
}
+
+ /// <summary>
+ /// Copies bytes from the source address to the destination address.
+ /// </summary>
+ [Intrinsic]
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public static void CopyBlock(void* destination, void* source, uint byteCount)
+ {
+ throw new PlatformNotSupportedException();
+ }
}
}
diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.cs
index c8d9a0a74c26..89855a54113d 100644
--- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.cs
+++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.cs
@@ -68,6 +68,14 @@ namespace System.Runtime
private IntPtr _dummy; // For alignment
}
+ internal struct MethodRegionInfo
+ {
+ internal byte* _hotStartAddress;
+ internal nuint _hotSize;
+ internal byte* _coldStartAddress;
+ internal nuint _coldSize;
+ }
+
#pragma warning disable IDE0060
// This is a fail-fast function used by the runtime as a last resort that will terminate the process with
// as little effort as possible. No guarantee is made about the semantics of this fail-fast.
@@ -932,6 +940,20 @@ namespace System.Runtime
"Handling frame must have a valid stack frame pointer");
}
+ // Caclulate the code offset from the start of the method as if the hot and cold regions were
+ // stored sequentially in memory.
+ private static uint CalculateCodeOffset(byte* pbControlPC, in MethodRegionInfo methodRegionInfo)
+ {
+ uint codeOffset = (uint)(pbControlPC - methodRegionInfo._hotStartAddress);
+ // If the PC is in the cold region, adjust the offset to be relative to the start of the method.
+ if ((methodRegionInfo._coldSize != 0) && (codeOffset >= methodRegionInfo._hotSize))
+ {
+ codeOffset = (uint)(methodRegionInfo._hotSize + (nuint)(pbControlPC - methodRegionInfo._coldStartAddress));
+ }
+
+ return codeOffset;
+ }
+
private static void UpdateStackTrace(object exceptionObj, UIntPtr curFramePtr, IntPtr ip, UIntPtr sp,
ref bool isFirstRethrowFrame, ref UIntPtr prevFramePtr, ref bool isFirstFrame, ref ExInfo exInfo)
{
@@ -958,13 +980,13 @@ namespace System.Runtime
tryRegionIdx = MaxTryRegionIdx;
EHEnum ehEnum;
- byte* pbMethodStartAddress;
- if (!InternalCalls.RhpEHEnumInitFromStackFrameIterator(ref frameIter, &pbMethodStartAddress, &ehEnum))
+ MethodRegionInfo methodRegionInfo;
+ if (!InternalCalls.RhpEHEnumInitFromStackFrameIterator(ref frameIter, out methodRegionInfo, &ehEnum))
return false;
byte* pbControlPC = frameIter.ControlPC;
- uint codeOffset = (uint)(pbControlPC - pbMethodStartAddress);
+ uint codeOffset = CalculateCodeOffset(pbControlPC, in methodRegionInfo);
uint lastTryStart = 0, lastTryEnd = 0;
@@ -1111,13 +1133,14 @@ namespace System.Runtime
private static void InvokeSecondPass(ref ExInfo exInfo, uint idxStart, uint idxLimit)
{
EHEnum ehEnum;
- byte* pbMethodStartAddress;
- if (!InternalCalls.RhpEHEnumInitFromStackFrameIterator(ref exInfo._frameIter, &pbMethodStartAddress, &ehEnum))
+ MethodRegionInfo methodRegionInfo;
+
+ if (!InternalCalls.RhpEHEnumInitFromStackFrameIterator(ref exInfo._frameIter, out methodRegionInfo, &ehEnum))
return;
byte* pbControlPC = exInfo._frameIter.ControlPC;
- uint codeOffset = (uint)(pbControlPC - pbMethodStartAddress);
+ uint codeOffset = CalculateCodeOffset(pbControlPC, in methodRegionInfo);
uint lastTryStart = 0, lastTryEnd = 0;
diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs
index 806208187e92..8cf281392abf 100644
--- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs
+++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs
@@ -177,7 +177,7 @@ namespace System.Runtime
[RuntimeImport(Redhawk.BaseName, "RhpEHEnumInitFromStackFrameIterator")]
[MethodImpl(MethodImplOptions.InternalCall)]
- internal static extern unsafe bool RhpEHEnumInitFromStackFrameIterator(ref StackFrameIterator pFrameIter, byte** pMethodStartAddress, void* pEHEnum);
+ internal static extern unsafe bool RhpEHEnumInitFromStackFrameIterator(ref StackFrameIterator pFrameIter, out EH.MethodRegionInfo pMethodRegionInfo, void* pEHEnum);
[RuntimeImport(Redhawk.BaseName, "RhpEHEnumNext")]
[MethodImpl(MethodImplOptions.InternalCall)]
diff --git a/src/coreclr/nativeaot/Runtime/CMakeLists.txt b/src/coreclr/nativeaot/Runtime/CMakeLists.txt
index 7ef6f223fdb5..a5e865ff749d 100644
--- a/src/coreclr/nativeaot/Runtime/CMakeLists.txt
+++ b/src/coreclr/nativeaot/Runtime/CMakeLists.txt
@@ -263,11 +263,13 @@ add_definitions(-D_LIB)
# there is a problem with undefined symbols when this is set
# add_definitions(-DSTRESS_HEAP)
-if(WIN32)
+if(CLR_CMAKE_TARGET_WIN32)
set(FEATURE_ETW 1)
add_definitions(-DFEATURE_ETW)
add_definitions(-DFEATURE_SUSPEND_REDIRECTION)
- add_definitions(-DFEATURE_SPECIAL_USER_MODE_APC)
+ if (CLR_CMAKE_TARGET_ARCH_AMD64)
+ add_definitions(-DFEATURE_SPECIAL_USER_MODE_APC)
+ endif()
else()
if(NOT CLR_CMAKE_TARGET_MACCATALYST AND NOT CLR_CMAKE_TARGET_IOS AND NOT CLR_CMAKE_TARGET_TVOS)
add_definitions(-DFEATURE_READONLY_GS_COOKIE)
diff --git a/src/coreclr/nativeaot/Runtime/EHHelpers.cpp b/src/coreclr/nativeaot/Runtime/EHHelpers.cpp
index 28cb9e617f09..1a54b9bcc9b5 100644
--- a/src/coreclr/nativeaot/Runtime/EHHelpers.cpp
+++ b/src/coreclr/nativeaot/Runtime/EHHelpers.cpp
@@ -29,13 +29,25 @@
#include "MethodTable.inl"
#include "CommonMacros.inl"
+struct MethodRegionInfo
+{
+ void* hotStartAddress;
+ size_t hotSize;
+ void* coldStartAddress;
+ size_t coldSize;
+};
+
COOP_PINVOKE_HELPER(FC_BOOL_RET, RhpEHEnumInitFromStackFrameIterator, (
- StackFrameIterator* pFrameIter, void ** pMethodStartAddressOut, EHEnum* pEHEnum))
+ StackFrameIterator* pFrameIter, MethodRegionInfo* pMethodRegionInfoOut, EHEnum* pEHEnum))
{
ICodeManager * pCodeManager = pFrameIter->GetCodeManager();
pEHEnum->m_pCodeManager = pCodeManager;
- FC_RETURN_BOOL(pCodeManager->EHEnumInit(pFrameIter->GetMethodInfo(), pMethodStartAddressOut, &pEHEnum->m_state));
+ pMethodRegionInfoOut->hotSize = 0; // unknown
+ pMethodRegionInfoOut->coldStartAddress = nullptr;
+ pMethodRegionInfoOut->coldSize = 0;
+
+ FC_RETURN_BOOL(pCodeManager->EHEnumInit(pFrameIter->GetMethodInfo(), &pMethodRegionInfoOut->hotStartAddress, &pEHEnum->m_state));
}
COOP_PINVOKE_HELPER(FC_BOOL_RET, RhpEHEnumNext, (EHEnum* pEHEnum, EHClause* pEHClause))
diff --git a/src/coreclr/nativeaot/Runtime/MathHelpers.cpp b/src/coreclr/nativeaot/Runtime/MathHelpers.cpp
index 1d7330567916..2d743dcb3fef 100644
--- a/src/coreclr/nativeaot/Runtime/MathHelpers.cpp
+++ b/src/coreclr/nativeaot/Runtime/MathHelpers.cpp
@@ -132,11 +132,6 @@ EXTERN_C NATIVEAOT_API int64_t REDHAWK_CALLCONV RhpLMul(int64_t i, int64_t j)
return i * j;
}
-EXTERN_C NATIVEAOT_API uint64_t REDHAWK_CALLCONV RhpULMul(uint64_t i, uint64_t j)
-{
- return i * j;
-}
-
EXTERN_C NATIVEAOT_API uint64_t REDHAWK_CALLCONV RhpLRsz(uint64_t i, int32_t j)
{
return i >> (j & 0x3f);
diff --git a/src/coreclr/nativeaot/Runtime/arm64/AllocFast.S b/src/coreclr/nativeaot/Runtime/arm64/AllocFast.S
index 79ffed2b0521..966b052a2b9f 100644
--- a/src/coreclr/nativeaot/Runtime/arm64/AllocFast.S
+++ b/src/coreclr/nativeaot/Runtime/arm64/AllocFast.S
@@ -90,9 +90,11 @@ LOCAL_LABEL(RhpNewFast_RarePath):
// Set the new objects MethodTable pointer on success.
cbz x0, LOCAL_LABEL(NewOutOfMemory)
+ .cfi_remember_state
POP_COOP_PINVOKE_FRAME
EPILOG_RETURN
+ .cfi_restore_state
LOCAL_LABEL(NewOutOfMemory):
// This is the OOM failure path. We are going to tail-call to a managed helper that will throw
// an out of memory exception that the caller of this allocator understands.
@@ -262,9 +264,11 @@ LOCAL_LABEL(RhpNewArray_Rare):
// Set the new objects MethodTable pointer and length on success.
cbz x0, LOCAL_LABEL(ArrayOutOfMemory)
+ .cfi_remember_state
POP_COOP_PINVOKE_FRAME
EPILOG_RETURN
+ .cfi_restore_state
LOCAL_LABEL(ArrayOutOfMemory):
// This is the OOM failure path. We are going to tail-call to a managed helper that will throw
// an out of memory exception that the caller of this allocator understands.
diff --git a/src/coreclr/nativeaot/Runtime/arm64/GcProbe.S b/src/coreclr/nativeaot/Runtime/arm64/GcProbe.S
index abe7555b7611..8075335ea0b2 100644
--- a/src/coreclr/nativeaot/Runtime/arm64/GcProbe.S
+++ b/src/coreclr/nativeaot/Runtime/arm64/GcProbe.S
@@ -146,8 +146,11 @@ NESTED_ENTRY RhpWaitForGC, _TEXT, NoHandler
ldr x2, [sp, #OFFSETOF__PInvokeTransitionFrame__m_Flags]
tbnz x2, #PTFF_THREAD_ABORT_BIT, LOCAL_LABEL(ThrowThreadAbort)
+ .cfi_remember_state
POP_PROBE_FRAME
EPILOG_RETURN
+
+ .cfi_restore_state
LOCAL_LABEL(ThrowThreadAbort):
POP_PROBE_FRAME
mov w0, #STATUS_REDHAWK_THREAD_ABORT
diff --git a/src/coreclr/nativeaot/Runtime/inc/ModuleHeaders.h b/src/coreclr/nativeaot/Runtime/inc/ModuleHeaders.h
index 750faccc8283..6a3b24a39448 100644
--- a/src/coreclr/nativeaot/Runtime/inc/ModuleHeaders.h
+++ b/src/coreclr/nativeaot/Runtime/inc/ModuleHeaders.h
@@ -12,7 +12,7 @@ struct ReadyToRunHeaderConstants
static const uint32_t Signature = 0x00525452; // 'RTR'
static const uint32_t CurrentMajorVersion = 9;
- static const uint32_t CurrentMinorVersion = 1;
+ static const uint32_t CurrentMinorVersion = 2;
};
struct ReadyToRunHeader
diff --git a/src/coreclr/nativeaot/Runtime/windows/PalRedhawkMinWin.cpp b/src/coreclr/nativeaot/Runtime/windows/PalRedhawkMinWin.cpp
index 06706ed1e8a4..2842caf78ef6 100644
--- a/src/coreclr/nativeaot/Runtime/windows/PalRedhawkMinWin.cpp
+++ b/src/coreclr/nativeaot/Runtime/windows/PalRedhawkMinWin.cpp
@@ -543,7 +543,7 @@ REDHAWK_PALEXPORT void REDHAWK_PALAPI PalHijack(HANDLE hThread, _In_opt_ void* p
pThread->SetActivationPending(false);
DWORD lastError = GetLastError();
- if (lastError != ERROR_INVALID_PARAMETER)
+ if (lastError != ERROR_INVALID_PARAMETER && lastError != ERROR_NOT_SUPPORTED)
{
// An unexpected failure has happened. It is a concern.
ASSERT_UNCONDITIONALLY("Failed to queue an APC for unusual reason.");
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/CompatibilitySuppressions.xml b/src/coreclr/nativeaot/System.Private.CoreLib/src/CompatibilitySuppressions.xml
index 9d09ab7e1059..43220163b098 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/CompatibilitySuppressions.xml
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/CompatibilitySuppressions.xml
@@ -834,10 +834,6 @@
<Target>M:System.Reflection.MethodBase.GetParametersAsSpan</Target>
</Suppression>
<Suppression>
- <DiagnosticId>CP0002</DiagnosticId>
- <Target>M:System.Threading.Lock.#ctor(System.Boolean)</Target>
- </Suppression>
- <Suppression>
<DiagnosticId>CP0015</DiagnosticId>
<Target>M:System.Diagnostics.Tracing.EventSource.Write``1(System.String,``0):[T:System.Diagnostics.CodeAnalysis.RequiresUnreferencedCodeAttribute]</Target>
</Suppression>
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/FrozenObjectHeapManager.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/FrozenObjectHeapManager.cs
index 81f1eec9cfdf..d81a5409bf42 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/FrozenObjectHeapManager.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/FrozenObjectHeapManager.cs
@@ -16,7 +16,7 @@ namespace Internal.Runtime
{
public static readonly FrozenObjectHeapManager Instance = new FrozenObjectHeapManager();
- private readonly Lock m_Crst = new Lock(useTrivialWaits: true);
+ private readonly LowLevelLock m_Crst = new LowLevelLock();
private FrozenObjectSegment m_CurrentSegment;
// Default size to reserve for a frozen segment
@@ -34,7 +34,9 @@ namespace Internal.Runtime
{
HalfBakedObject* obj = null;
- using (m_Crst.EnterScope())
+ m_Crst.Acquire();
+
+ try
{
Debug.Assert(type != null);
// _ASSERT(FOH_COMMIT_SIZE >= MIN_OBJECT_SIZE);
@@ -82,6 +84,10 @@ namespace Internal.Runtime
Debug.Assert(obj != null);
}
} // end of m_Crst lock
+ finally
+ {
+ m_Crst.Release();
+ }
IntPtr result = (IntPtr)obj;
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj b/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj
index 4ca91458c70e..86ce820851c1 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj
@@ -184,7 +184,6 @@
<Compile Include="System\Math.NativeAot.cs" />
<Compile Include="System\MathF.NativeAot.cs" />
<Compile Include="System\Object.NativeAot.cs" />
- <Compile Include="System\Resources\ManifestBasedResourceGroveler.NativeAot.cs" />
<Compile Include="System\RuntimeArgumentHandle.cs" />
<Compile Include="System\RuntimeType.cs" />
<Compile Include="System\Runtime\ControlledExecution.NativeAot.cs" />
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Array.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Array.NativeAot.cs
index 64ba6597446a..61f70e212483 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Array.NativeAot.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Array.NativeAot.cs
@@ -494,7 +494,7 @@ namespace System
// Copy scenario: ValueType-array to value-type array with no embedded gc-refs.
nuint elementSize = sourceArray.ElementSize;
- Buffer.Memmove(
+ SpanHelpers.Memmove(
ref Unsafe.AddByteOffset(ref MemoryMarshal.GetArrayDataReference(destinationArray), (nuint)destinationIndex * elementSize),
ref Unsafe.AddByteOffset(ref MemoryMarshal.GetArrayDataReference(sourceArray), (nuint)sourceIndex * elementSize),
elementSize * (nuint)length);
@@ -534,7 +534,7 @@ namespace System
if (sourceElementType == destElementType)
{
// Multidim arrays and enum->int copies can still reach this path.
- Buffer.Memmove(ref *data, ref *srcData, (nuint)length * srcElementSize);
+ SpanHelpers.Memmove(ref *data, ref *srcData, (nuint)length * srcElementSize);
return;
}
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Object.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Object.NativeAot.cs
index 727fbc9fbfdd..9f8dbe11a212 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Object.NativeAot.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Object.NativeAot.cs
@@ -41,7 +41,7 @@ namespace System
if (this.GetMethodTable()->ContainsGCPointers)
Buffer.BulkMoveWithWriteBarrier(ref dst, ref src, byteCount);
else
- Buffer.Memmove(ref dst, ref src, byteCount);
+ SpanHelpers.Memmove(ref dst, ref src, byteCount);
return clone;
}
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/MethodInfos/CustomMethodInvoker.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/MethodInfos/CustomMethodInvoker.cs
index fbf56e6e4603..f2c15681c288 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/MethodInfos/CustomMethodInvoker.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/MethodInfos/CustomMethodInvoker.cs
@@ -36,7 +36,7 @@ namespace System.Reflection.Runtime.MethodInfos
if (!(thisObject == null && 0 != (_options & InvokerOptions.AllowNullThis)))
ValidateThis(thisObject, _thisType.TypeHandle);
- int argCount = (arguments != null) ? arguments.Length : 0;
+ int argCount = arguments.Length;
if (argCount != _parameterTypes.Length)
throw new TargetParameterCountException();
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/RuntimeAssembly.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/RuntimeAssembly.cs
index 24a10f91faa7..9587d6f2d67f 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/RuntimeAssembly.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/RuntimeAssembly.cs
@@ -1,10 +1,34 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
+using System.Globalization;
+
+using Internal.Reflection.Augments;
+
namespace System.Reflection
{
// Base class for runtime implemented Assembly
public abstract class RuntimeAssembly : Assembly
{
+ internal static Assembly? InternalGetSatelliteAssembly(Assembly mainAssembly, CultureInfo culture, Version? version, bool throwOnFileNotFound)
+ {
+ AssemblyName mainAssemblyAn = mainAssembly.GetName();
+ AssemblyName an = new AssemblyName();
+
+ an.CultureInfo = culture;
+ an.Name = mainAssemblyAn.Name + ".resources";
+ an.SetPublicKeyToken(mainAssemblyAn.GetPublicKeyToken());
+ an.Flags = mainAssemblyAn.Flags;
+ an.Version = version ?? mainAssemblyAn.Version;
+
+ Assembly? retAssembly = ReflectionAugments.ReflectionCoreCallbacks.Load(an, throwOnFileNotFound);
+
+ if (retAssembly == mainAssembly)
+ {
+ retAssembly = null;
+ }
+
+ return retAssembly;
+ }
}
}
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.NativeAot.cs
deleted file mode 100644
index f16c1ba38923..000000000000
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.NativeAot.cs
+++ /dev/null
@@ -1,41 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System.Collections.Generic;
-using System.Diagnostics;
-using System.Globalization;
-using System.IO;
-using System.Reflection;
-using System.Text;
-
-using Internal.Reflection.Augments;
-
-namespace System.Resources
-{
- internal partial class ManifestBasedResourceGroveler
- {
- // Internal version of GetSatelliteAssembly that avoids throwing FileNotFoundException
- private static Assembly? InternalGetSatelliteAssembly(Assembly mainAssembly,
- CultureInfo culture,
- Version? version)
- {
- AssemblyName mainAssemblyAn = mainAssembly.GetName();
- AssemblyName an = new AssemblyName();
-
- an.CultureInfo = culture;
- an.Name = mainAssemblyAn.Name + ".resources";
- an.SetPublicKeyToken(mainAssemblyAn.GetPublicKeyToken());
- an.Flags = mainAssemblyAn.Flags;
- an.Version = version ?? mainAssemblyAn.Version;
-
- Assembly? retAssembly = ReflectionAugments.ReflectionCoreCallbacks.Load(an, false);
-
- if (retAssembly == mainAssembly)
- {
- retAssembly = null;
- }
-
- return retAssembly;
- }
- }
-}
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/CompilerServices/ClassConstructorRunner.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/CompilerServices/ClassConstructorRunner.cs
index c3baa5a7dad0..737a3db6c598 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/CompilerServices/ClassConstructorRunner.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/CompilerServices/ClassConstructorRunner.cs
@@ -275,7 +275,7 @@ namespace System.Runtime.CompilerServices
#if TARGET_WASM
if (s_cctorGlobalLock == null)
{
- Interlocked.CompareExchange(ref s_cctorGlobalLock, new Lock(useTrivialWaits: true), null);
+ Interlocked.CompareExchange(ref s_cctorGlobalLock, new Lock(), null);
}
if (s_cctorArrays == null)
{
@@ -342,7 +342,7 @@ namespace System.Runtime.CompilerServices
Debug.Assert(resultArray[resultIndex]._pContext == default(StaticClassConstructionContext*));
resultArray[resultIndex]._pContext = pContext;
- resultArray[resultIndex].Lock = new Lock(useTrivialWaits: true);
+ resultArray[resultIndex].Lock = new Lock();
s_count++;
}
@@ -489,7 +489,7 @@ namespace System.Runtime.CompilerServices
internal static void Initialize()
{
s_cctorArrays = new Cctor[10][];
- s_cctorGlobalLock = new Lock(useTrivialWaits: true);
+ s_cctorGlobalLock = new Lock();
}
[Conditional("ENABLE_NOISY_CCTOR_LOG")]
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.NativeAot.cs
index e06ac7457049..4d156c039422 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.NativeAot.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.NativeAot.cs
@@ -44,7 +44,7 @@ namespace System.Runtime.InteropServices
private static readonly List<GCHandle> s_referenceTrackerNativeObjectWrapperCache = new List<GCHandle>();
private readonly ConditionalWeakTable<object, ManagedObjectWrapperHolder> _ccwTable = new ConditionalWeakTable<object, ManagedObjectWrapperHolder>();
- private readonly Lock _lock = new Lock(useTrivialWaits: true);
+ private readonly Lock _lock = new Lock();
private readonly Dictionary<IntPtr, GCHandle> _rcwCache = new Dictionary<IntPtr, GCHandle>();
internal static bool TryGetComInstanceForIID(object obj, Guid iid, out IntPtr unknown, out long wrapperId)
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.NativeAot.cs
index a3ccfc5a8c43..490997c1da90 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.NativeAot.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.NativeAot.cs
@@ -93,7 +93,7 @@ namespace System.Runtime.InteropServices
{
nuint size = (nuint)RuntimeInteropData.GetStructUnsafeStructSize(structureTypeHandle);
- Buffer.Memmove(ref structure.GetRawData(), ref *(byte*)ptr, size);
+ SpanHelpers.Memmove(ref structure.GetRawData(), ref *(byte*)ptr, size);
}
}
@@ -180,7 +180,7 @@ namespace System.Runtime.InteropServices
{
nuint size = (nuint)RuntimeInteropData.GetStructUnsafeStructSize(structureTypeHandle);
- Buffer.Memmove(ref *(byte*)ptr, ref structure.GetRawData(), size);
+ SpanHelpers.Memmove(ref *(byte*)ptr, ref structure.GetRawData(), size);
}
}
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Condition.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Condition.cs
index 1c6cdadaf022..70843a3b940f 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Condition.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Condition.cs
@@ -114,7 +114,6 @@ namespace System.Threading
success =
waiter.ev.WaitOneNoCheck(
millisecondsTimeout,
- false, // useTrivialWaits
associatedObjectForMonitorWait,
associatedObjectForMonitorWait != null
? NativeRuntimeEventSource.WaitHandleWaitSourceMap.MonitorWait
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Lock.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Lock.NativeAot.cs
index 78fc77454019..318a8cc76802 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Lock.NativeAot.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Lock.NativeAot.cs
@@ -92,18 +92,6 @@ namespace System.Threading
_recursionCount = previousRecursionCount;
}
- private static bool IsFullyInitialized
- {
- get
- {
- // If NativeRuntimeEventSource is already being class-constructed by this thread earlier in the stack, Log can
- // be null. This property is used to avoid going down the wait path in that case to avoid null checks in several
- // other places.
- Debug.Assert((StaticsInitializationStage)s_staticsInitializationStage == StaticsInitializationStage.Complete);
- return NativeRuntimeEventSource.Log != null;
- }
- }
-
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private TryLockResult LazyInitializeOrEnter()
{
@@ -113,10 +101,6 @@ namespace System.Threading
case StaticsInitializationStage.Complete:
if (_spinCount == SpinCountNotInitialized)
{
- if (!IsFullyInitialized)
- {
- goto case StaticsInitializationStage.Started;
- }
_spinCount = s_maxSpinCount;
}
return TryLockResult.Spin;
@@ -137,7 +121,7 @@ namespace System.Threading
}
stage = (StaticsInitializationStage)Volatile.Read(ref s_staticsInitializationStage);
- if (stage == StaticsInitializationStage.Complete && IsFullyInitialized)
+ if (stage == StaticsInitializationStage.Complete)
{
goto case StaticsInitializationStage.Complete;
}
@@ -182,17 +166,14 @@ namespace System.Threading
return true;
}
- bool isFullyInitialized;
try
{
s_isSingleProcessor = Environment.IsSingleProcessor;
s_maxSpinCount = DetermineMaxSpinCount();
s_minSpinCount = DetermineMinSpinCount();
- // Also initialize some types that are used later to prevent potential class construction cycles. If
- // NativeRuntimeEventSource is already being class-constructed by this thread earlier in the stack, Log can be
- // null. Avoid going down the wait path in that case to avoid null checks in several other places.
- isFullyInitialized = NativeRuntimeEventSource.Log != null;
+ // Also initialize some types that are used later to prevent potential class construction cycles
+ _ = NativeRuntimeEventSource.Log;
}
catch
{
@@ -201,7 +182,7 @@ namespace System.Threading
}
Volatile.Write(ref s_staticsInitializationStage, (int)StaticsInitializationStage.Complete);
- return isFullyInitialized;
+ return true;
}
// Returns false until the static variable is lazy-initialized
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/SyncTable.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/SyncTable.cs
index c3a273d32739..02d7b4167ca6 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/SyncTable.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/SyncTable.cs
@@ -65,7 +65,7 @@ namespace System.Threading
/// <summary>
/// Protects all mutable operations on s_entries, s_freeEntryList, s_unusedEntryIndex. Also protects growing the table.
/// </summary>
- internal static readonly Lock s_lock = new Lock(useTrivialWaits: true);
+ internal static readonly Lock s_lock = new Lock();
/// <summary>
/// The dynamically growing array of sync entries.
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.Windows.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.Windows.cs
index d6ea54412e10..d0a48bed6c8d 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.Windows.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.Windows.cs
@@ -167,7 +167,7 @@ namespace System.Threading
}
else
{
- result = WaitHandle.WaitOneCore(waitHandle.DangerousGetHandle(), millisecondsTimeout, useTrivialWaits: false);
+ result = WaitHandle.WaitOneCore(waitHandle.DangerousGetHandle(), millisecondsTimeout);
}
return result == (int)Interop.Kernel32.WAIT_OBJECT_0;
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.cs
index 7fc526fc8347..5bdb703b4452 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Thread.NativeAot.cs
@@ -31,7 +31,7 @@ namespace System.Threading
private Exception? _startException;
// Protects starting the thread and setting its priority
- private Lock _lock = new Lock(useTrivialWaits: true);
+ private Lock _lock = new Lock();
// This is used for a quick check on thread pool threads after running a work item to determine if the name, background
// state, or priority were changed by the work item, and if so to reset it. Other threads may also change some of those,
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Type.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Type.NativeAot.cs
index 9589edaab42b..4d9b2f3981b5 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Type.NativeAot.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Type.NativeAot.cs
@@ -31,14 +31,15 @@ namespace System
private static class AllocationLockHolder
{
- public static Lock AllocationLock = new Lock(useTrivialWaits: true);
+ public static LowLevelLock AllocationLock = new LowLevelLock();
}
[MethodImpl(MethodImplOptions.NoInlining)]
private static unsafe RuntimeType GetTypeFromMethodTableSlow(MethodTable* pMT)
{
// Allocate and set the RuntimeType under a lock - there's no way to free it if there is a race.
- using (AllocationLockHolder.AllocationLock.EnterScope())
+ AllocationLockHolder.AllocationLock.Acquire();
+ try
{
ref RuntimeType? runtimeTypeCache = ref Unsafe.AsRef<RuntimeType?>(pMT->WritableData);
if (runtimeTypeCache != null)
@@ -54,6 +55,10 @@ namespace System
return type;
}
+ finally
+ {
+ AllocationLockHolder.AllocationLock.Release();
+ }
}
//
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/ValueType.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/ValueType.cs
index e8340e411915..968e97c425cf 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/ValueType.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/ValueType.cs
@@ -95,43 +95,28 @@ namespace System
public override unsafe int GetHashCode()
{
- int hashCode = (int)this.GetMethodTable()->HashCode;
+ HashCode hashCode = default;
+ hashCode.Add((IntPtr)this.GetMethodTable());
- hashCode ^= GetHashCodeImpl();
-
- return hashCode;
- }
-
- private unsafe int GetHashCodeImpl()
- {
int numFields = __GetFieldHelper(GetNumFields, out _);
if (numFields == UseFastHelper)
- return FastGetValueTypeHashCodeHelper(this.GetMethodTable(), ref this.GetRawData());
+ hashCode.AddBytes(GetSpanForField(this.GetMethodTable(), ref this.GetRawData()));
+ else
+ RegularGetValueTypeHashCode(ref hashCode, ref this.GetRawData(), numFields);
- return RegularGetValueTypeHashCode(ref this.GetRawData(), numFields);
+ return hashCode.ToHashCode();
}
- private static unsafe int FastGetValueTypeHashCodeHelper(MethodTable* type, ref byte data)
+ private static unsafe ReadOnlySpan<byte> GetSpanForField(MethodTable* type, ref byte data)
{
// Sanity check - if there are GC references, we should not be hashing bytes
Debug.Assert(!type->ContainsGCPointers);
-
- int size = (int)type->ValueTypeSize;
- int hashCode = 0;
-
- for (int i = 0; i < size / 4; i++)
- {
- hashCode ^= Unsafe.As<byte, int>(ref Unsafe.Add(ref data, i * 4));
- }
-
- return hashCode;
+ return new ReadOnlySpan<byte>(ref data, (int)type->ValueTypeSize);
}
- private unsafe int RegularGetValueTypeHashCode(ref byte data, int numFields)
+ private unsafe void RegularGetValueTypeHashCode(ref HashCode hashCode, ref byte data, int numFields)
{
- int hashCode = 0;
-
// We only take the hashcode for the first non-null field. That's what the CLR does.
for (int i = 0; i < numFields; i++)
{
@@ -142,15 +127,15 @@ namespace System
if (fieldType->ElementType == EETypeElementType.Single)
{
- hashCode = Unsafe.As<byte, float>(ref fieldData).GetHashCode();
+ hashCode.Add(Unsafe.As<byte, float>(ref fieldData));
}
else if (fieldType->ElementType == EETypeElementType.Double)
{
- hashCode = Unsafe.As<byte, double>(ref fieldData).GetHashCode();
+ hashCode.Add(Unsafe.As<byte, double>(ref fieldData));
}
else if (fieldType->IsPrimitive)
{
- hashCode = FastGetValueTypeHashCodeHelper(fieldType, ref fieldData);
+ hashCode.AddBytes(GetSpanForField(fieldType, ref fieldData));
}
else if (fieldType->IsValueType)
{
@@ -164,7 +149,7 @@ namespace System
var fieldValue = (ValueType)RuntimeImports.RhBox(fieldType, ref fieldData);
if (fieldValue != null)
{
- hashCode = fieldValue.GetHashCodeImpl();
+ hashCode.Add(fieldValue);
}
else
{
@@ -177,7 +162,7 @@ namespace System
object fieldValue = Unsafe.As<byte, object>(ref fieldData);
if (fieldValue != null)
{
- hashCode = fieldValue.GetHashCode();
+ hashCode.Add(fieldValue);
}
else
{
@@ -187,8 +172,6 @@ namespace System
}
break;
}
-
- return hashCode;
}
}
}
diff --git a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericsRegistration.cs b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericsRegistration.cs
index 72c65865ff2a..cebd5d917896 100644
--- a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericsRegistration.cs
+++ b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.ConstructedGenericsRegistration.cs
@@ -24,7 +24,7 @@ namespace Internal.Runtime.TypeLoader
}
// To keep the synchronization simple, we execute all dynamic generic type registration/lookups under a global lock
- private Lock _dynamicGenericsLock = new Lock(useTrivialWaits: true);
+ private Lock _dynamicGenericsLock = new Lock();
internal void RegisterDynamicGenericTypesAndMethods(DynamicGenericsRegistrationData registrationData)
{
diff --git a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.StaticsLookup.cs b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.StaticsLookup.cs
index 1070b50fd75b..e442bfa940ea 100644
--- a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.StaticsLookup.cs
+++ b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.StaticsLookup.cs
@@ -15,7 +15,7 @@ namespace Internal.Runtime.TypeLoader
public sealed partial class TypeLoaderEnvironment
{
// To keep the synchronization simple, we execute all TLS registration/lookups under a global lock
- private Lock _threadStaticsLock = new Lock(useTrivialWaits: true);
+ private Lock _threadStaticsLock = new Lock();
// Counter to keep track of generated offsets for TLS cells of dynamic types;
private LowLevelDictionary<IntPtr, uint> _maxThreadLocalIndex = new LowLevelDictionary<IntPtr, uint>();
diff --git a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.cs b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.cs
index 42864c10f2eb..a3f0ebeaf1fc 100644
--- a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.cs
+++ b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.cs
@@ -145,7 +145,7 @@ namespace Internal.Runtime.TypeLoader
}
// To keep the synchronization simple, we execute all type loading under a global lock
- private Lock _typeLoaderLock = new Lock(useTrivialWaits: true);
+ private Lock _typeLoaderLock = new Lock();
public void VerifyTypeLoaderLockHeld()
{
diff --git a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeSystemContextFactory.cs b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeSystemContextFactory.cs
index 410296beaf0b..7f3037fc9a0c 100644
--- a/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeSystemContextFactory.cs
+++ b/src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeSystemContextFactory.cs
@@ -18,7 +18,7 @@ namespace Internal.Runtime.TypeLoader
// This allows us to avoid recreating the type resolution context again and again, but still allows it to go away once the types are no longer being built
private static GCHandle s_cachedContext = GCHandle.Alloc(null, GCHandleType.Weak);
- private static Lock s_lock = new Lock(useTrivialWaits: true);
+ private static Lock s_lock = new Lock();
public static TypeSystemContext Create()
{
diff --git a/src/coreclr/runtime.proj b/src/coreclr/runtime.proj
index a6442c16173e..773b0290d523 100644
--- a/src/coreclr/runtime.proj
+++ b/src/coreclr/runtime.proj
@@ -1,7 +1,6 @@
<Project Sdk="Microsoft.Build.NoTargets">
<PropertyGroup>
- <NativeBuildPartitionPropertiesToRemove>ClrFullNativeBuild;ClrRuntimeSubset;ClrJitSubset;ClrPalTestsSubset;ClrAllJitsSubset;ClrILToolsSubset;ClrNativeAotSubset;ClrSpmiSubset;ClrCrossComponentsSubset;ClrDebugSubset;HostArchitecture;PgoInstrument;NativeOptimizationDataSupported;CMakeArgs</NativeBuildPartitionPropertiesToRemove>
<_IcuDir Condition="'$(PkgMicrosoft_NETCore_Runtime_ICU_Transport)' != ''">$(PkgMicrosoft_NETCore_Runtime_ICU_Transport)/runtimes/$(TargetOS)-$(TargetArchitecture)$(_RuntimeVariant)/native</_IcuDir>
<_BuildNativeTargetOS>$(TargetOS)</_BuildNativeTargetOS>
diff --git a/src/coreclr/scripts/emitUnitTests.sh b/src/coreclr/scripts/emitUnitTests.sh
index 9738dc5c50e9..0d08f5bf8a49 100755
--- a/src/coreclr/scripts/emitUnitTests.sh
+++ b/src/coreclr/scripts/emitUnitTests.sh
@@ -109,6 +109,7 @@ cut -f 3- -d ' ' $output_dir/capstone_output.txt \
if [ -n "$verbose" ]; then
egrep "$verbose" $output_dir/clr_instrs.txt
+ egrep "$verbose" $output_dir/capstone_output.txt
else
(head -n 5; tail -n 5) < $output_dir/clr_instrs.txt
fi
diff --git a/src/coreclr/scripts/superpmi.py b/src/coreclr/scripts/superpmi.py
index 1d48affaac20..e5f1fe30adc8 100644
--- a/src/coreclr/scripts/superpmi.py
+++ b/src/coreclr/scripts/superpmi.py
@@ -508,7 +508,8 @@ def create_artifacts_base_name(coreclr_args, mch_file):
def read_csv(path):
with open(path, encoding="utf-8") as csv_file:
reader = csv.DictReader(csv_file)
- return list(reader)
+ for row in reader:
+ yield row
def decode_clrjit_build_string(clrjit_path):
""" Obtain information about the compiler that was used to compile the clrjit at the specified path.
@@ -1709,8 +1710,8 @@ class SuperPMIReplay:
command = [self.superpmi_path] + flags + [self.jit_path, mch_file]
(return_code, replay_output) = run_and_log_return_output(command)
- details = read_csv(details_info_file)
- print_superpmi_result(return_code, self.coreclr_args, self.aggregate_replay_metrics(details), None)
+ replay_metrics = self.aggregate_replay_metrics(details_info_file)
+ print_superpmi_result(return_code, self.coreclr_args, replay_metrics, None)
if return_code != 0:
# Don't report as replay failure missing data (return code 3).
@@ -1751,8 +1752,8 @@ class SuperPMIReplay:
return result
- def aggregate_replay_metrics(self, details):
- """ Given the CSV details file output by SPMI for a replay aggregate the
+ def aggregate_replay_metrics(self, details_file):
+ """ Given a path to a CSV details file output by SPMI for a replay aggregate the
successes, misses and failures
Returns:
@@ -1762,7 +1763,7 @@ class SuperPMIReplay:
num_successes = 0
num_misses = 0
num_failures = 0
- for row in details:
+ for row in read_csv(details_file):
result = row["Result"]
if result == "Success":
num_successes += 1
@@ -1860,8 +1861,8 @@ class DetailsSection:
def __exit__(self, *args):
self.write_fh.write("\n\n</div></details>\n")
-def aggregate_diff_metrics(details):
- """ Given the CSV details file output by SPMI for a diff aggregate the metrics.
+def aggregate_diff_metrics(details_file):
+ """ Given the path to a CSV details file output by SPMI for a diff aggregate the metrics.
"""
base_minopts = {"Successful compiles": 0, "Missing compiles": 0, "Failing compiles": 0,
@@ -1873,7 +1874,9 @@ def aggregate_diff_metrics(details):
diff_minopts = base_minopts.copy()
diff_fullopts = base_minopts.copy()
- for row in details:
+ diffs = []
+
+ for row in read_csv(details_file):
base_result = row["Base result"]
if row["MinOpts"] == "True":
@@ -1926,6 +1929,7 @@ def aggregate_diff_metrics(details):
if row["Has diff"] == "True":
base_dict["Contexts with diffs"] += 1
diff_dict["Contexts with diffs"] += 1
+ diffs.append(row)
base_overall = base_minopts.copy()
for k in base_overall.keys():
@@ -1948,7 +1952,8 @@ def aggregate_diff_metrics(details):
d["Relative PerfScore Geomean (Diffs)"] = 1
return ({"Overall": base_overall, "MinOpts": base_minopts, "FullOpts": base_fullopts},
- {"Overall": diff_overall, "MinOpts": diff_minopts, "FullOpts": diff_fullopts})
+ {"Overall": diff_overall, "MinOpts": diff_minopts, "FullOpts": diff_fullopts},
+ diffs)
class SuperPMIReplayAsmDiffs:
@@ -2150,8 +2155,7 @@ class SuperPMIReplayAsmDiffs:
command = [self.superpmi_path] + flags + [self.base_jit_path, self.diff_jit_path, mch_file]
return_code = run_and_log(command)
- details = read_csv(detailed_info_file)
- (base_metrics, diff_metrics) = aggregate_diff_metrics(details)
+ (base_metrics, diff_metrics, diffs) = aggregate_diff_metrics(detailed_info_file)
print_superpmi_result(return_code, self.coreclr_args, base_metrics, diff_metrics)
artifacts_base_name = create_artifacts_base_name(self.coreclr_args, mch_file)
@@ -2171,7 +2175,6 @@ class SuperPMIReplayAsmDiffs:
repro_base_command_line = "{} {} {}".format(self.superpmi_path, " ".join(altjit_asm_diffs_flags), self.diff_jit_path)
save_repro_mc_files(temp_location, self.coreclr_args, artifacts_base_name, repro_base_command_line)
- diffs = [r for r in details if r["Has diff"] == "True"]
if any(diffs):
files_with_asm_diffs.append(mch_file)
@@ -2922,8 +2925,7 @@ class SuperPMIReplayThroughputDiff:
command_string = " ".join(command)
logging.debug("'%s': Error return code: %s", command_string, return_code)
- details = read_csv(detailed_info_file)
- (base_metrics, diff_metrics) = aggregate_diff_metrics(details)
+ (base_metrics, diff_metrics, _) = aggregate_diff_metrics(detailed_info_file)
if base_metrics is not None and diff_metrics is not None:
base_instructions = base_metrics["Overall"]["Diff executed instructions"]
diff --git a/src/coreclr/tools/Common/Compiler/DependencyAnalysis/Target_X86/X86Emitter.cs b/src/coreclr/tools/Common/Compiler/DependencyAnalysis/Target_X86/X86Emitter.cs
index e386e9a34da6..8601f35180e4 100644
--- a/src/coreclr/tools/Common/Compiler/DependencyAnalysis/Target_X86/X86Emitter.cs
+++ b/src/coreclr/tools/Common/Compiler/DependencyAnalysis/Target_X86/X86Emitter.cs
@@ -17,6 +17,37 @@ namespace ILCompiler.DependencyAnalysis.X86
public ObjectDataBuilder Builder;
public TargetRegisterMap TargetRegister;
+ public void EmitMOV(ref AddrMode addrMode, Register reg)
+ {
+ Debug.Assert(addrMode.Size != AddrModeSize.Int8 && addrMode.Size != AddrModeSize.Int16);
+ EmitIndirInstruction(0x89, (byte)reg, ref addrMode);
+ }
+
+ public void EmitMOV(Register reg, ref AddrMode addrMode)
+ {
+ Debug.Assert(addrMode.Size != AddrModeSize.Int8 && addrMode.Size != AddrModeSize.Int16);
+ EmitIndirInstruction(0x8B, (byte)reg, ref addrMode);
+ }
+
+ public void EmitMOV(ref AddrMode addrMode, ISymbolNode symbol)
+ {
+ if (symbol.RepresentsIndirectionCell)
+ {
+ throw new NotImplementedException();
+ }
+ else
+ {
+ EmitIndirInstruction(0xC7, (byte)0, ref addrMode);
+ Builder.EmitReloc(symbol, RelocType.IMAGE_REL_BASED_HIGHLOW);
+ }
+ }
+
+ public void EmitLEA(Register reg, ref AddrMode addrMode)
+ {
+ Debug.Assert(addrMode.Size != AddrModeSize.Int8 && addrMode.Size != AddrModeSize.Int16);
+ EmitIndirInstruction(0x8D, (byte)reg, ref addrMode);
+ }
+
public void EmitCMP(ref AddrMode addrMode, sbyte immediate)
{
if (addrMode.Size == AddrModeSize.Int16)
@@ -48,18 +79,55 @@ namespace ILCompiler.DependencyAnalysis.X86
}
}
+ public void EmitJE(ISymbolNode symbol)
+ {
+ if (symbol.RepresentsIndirectionCell)
+ {
+ throw new NotImplementedException();
+ }
+ else
+ {
+ Builder.EmitByte(0x0f);
+ Builder.EmitByte(0x84);
+ Builder.EmitReloc(symbol, RelocType.IMAGE_REL_BASED_REL32);
+ }
+ }
+
public void EmitXOR(Register register1, Register register2)
{
Builder.EmitByte(0x33);
Builder.EmitByte((byte)(0xC0 | ((byte)register1 << 3) | (byte)register2));
}
+ public void EmitZeroReg(Register reg)
+ {
+ EmitXOR(reg, reg);
+ }
+
+ public void EmitPOP(Register reg)
+ {
+ Builder.EmitByte((byte)(0x58 + (byte)reg));
+ }
+
+ public void EmitStackDup()
+ {
+ // PUSH [ESP]
+ Builder.EmitByte(0xff);
+ Builder.EmitByte(0x34);
+ Builder.EmitByte(0x24);
+ }
+
public void EmitPUSH(sbyte imm8)
{
Builder.EmitByte(0x6A);
Builder.EmitByte(unchecked((byte)imm8));
}
+ public void EmitPUSH(Register reg)
+ {
+ Builder.EmitByte((byte)(0x50 + (byte)reg));
+ }
+
public void EmitPUSH(ISymbolNode node)
{
if (node.RepresentsIndirectionCell)
@@ -103,6 +171,11 @@ namespace ILCompiler.DependencyAnalysis.X86
Builder.EmitByte(0xCC);
}
+ public void EmitJmpToAddrMode(ref AddrMode addrMode)
+ {
+ EmitIndirInstruction(0xFF, 0x4, ref addrMode);
+ }
+
public void EmitRET()
{
Builder.EmitByte(0xC3);
diff --git a/src/coreclr/tools/Common/Compiler/InstructionSetSupport.cs b/src/coreclr/tools/Common/Compiler/InstructionSetSupport.cs
index 1e7a392eca49..81e2adbc2003 100644
--- a/src/coreclr/tools/Common/Compiler/InstructionSetSupport.cs
+++ b/src/coreclr/tools/Common/Compiler/InstructionSetSupport.cs
@@ -10,6 +10,12 @@ using Internal.JitInterface;
namespace ILCompiler
{
+ [Flags]
+ public enum InstructionSetSupportFlags
+ {
+ Vector512Throttling = 0x1,
+ }
+
public class InstructionSetSupport
{
private readonly TargetArchitecture _targetArchitecture;
@@ -17,19 +23,21 @@ namespace ILCompiler
private readonly InstructionSetFlags _supportedInstructionSets;
private readonly InstructionSetFlags _unsupportedInstructionSets;
private readonly InstructionSetFlags _nonSpecifiableInstructionSets;
+ private readonly InstructionSetSupportFlags _flags;
public InstructionSetSupport(InstructionSetFlags supportedInstructionSets, InstructionSetFlags unsupportedInstructionSets, TargetArchitecture architecture) :
this(supportedInstructionSets, unsupportedInstructionSets, supportedInstructionSets, default(InstructionSetFlags), architecture)
{
}
- public InstructionSetSupport(InstructionSetFlags supportedInstructionSets, InstructionSetFlags unsupportedInstructionSets, InstructionSetFlags optimisticInstructionSets, InstructionSetFlags nonSpecifiableInstructionSets, TargetArchitecture architecture)
+ public InstructionSetSupport(InstructionSetFlags supportedInstructionSets, InstructionSetFlags unsupportedInstructionSets, InstructionSetFlags optimisticInstructionSets, InstructionSetFlags nonSpecifiableInstructionSets, TargetArchitecture architecture, InstructionSetSupportFlags flags = 0)
{
_supportedInstructionSets = supportedInstructionSets;
_unsupportedInstructionSets = unsupportedInstructionSets;
_optimisticInstructionSets = optimisticInstructionSets;
_targetArchitecture = architecture;
_nonSpecifiableInstructionSets = nonSpecifiableInstructionSets;
+ _flags = flags;
}
public bool IsInstructionSetSupported(InstructionSet instructionSet)
@@ -54,6 +62,8 @@ namespace ILCompiler
public TargetArchitecture Architecture => _targetArchitecture;
+ public InstructionSetSupportFlags Flags => _flags;
+
public static string GetHardwareIntrinsicId(TargetArchitecture architecture, TypeDesc potentialTypeDesc)
{
if (!potentialTypeDesc.IsIntrinsic || !(potentialTypeDesc is MetadataType potentialType))
diff --git a/src/coreclr/tools/Common/InstructionSetHelpers.cs b/src/coreclr/tools/Common/InstructionSetHelpers.cs
index e3236467f931..718053f13d0c 100644
--- a/src/coreclr/tools/Common/InstructionSetHelpers.cs
+++ b/src/coreclr/tools/Common/InstructionSetHelpers.cs
@@ -4,6 +4,7 @@
using System.Collections.Generic;
using System.Diagnostics;
using System.Runtime.InteropServices;
+using System.Runtime.Intrinsics.X86;
using ILCompiler;
@@ -19,6 +20,7 @@ namespace System.CommandLine
string mustNotBeMessage, string invalidImplicationMessage, Logger logger, bool optimizingForSize = false)
{
InstructionSetSupportBuilder instructionSetSupportBuilder = new(targetArchitecture);
+ InstructionSetSupportFlags flags = 0;
// Ready to run images are built with certain instruction set baselines
if ((targetArchitecture == TargetArchitecture.X86) || (targetArchitecture == TargetArchitecture.X64))
@@ -63,6 +65,52 @@ namespace System.CommandLine
}
HardwareIntrinsicHelpers.AddRuntimeRequiredIsaFlagsToBuilder(instructionSetSupportBuilder, cpuFeatures);
+ if (targetArchitecture is TargetArchitecture.X64 or TargetArchitecture.X86)
+ {
+ // Some architectures can experience frequency throttling when executing
+ // 512-bit width instructions. To account for this we set the
+ // default preferred vector width to 256-bits in some scenarios.
+ (int Eax, int Ebx, int Ecx, int Edx) cpuidInfo = X86Base.CpuId(0, 0);
+ bool isGenuineIntel = (cpuidInfo.Ebx == 0x756E6547) && // Genu
+ (cpuidInfo.Edx == 0x49656E69) && // ineI
+ (cpuidInfo.Ecx == 0x6C65746E); // ntel
+ if (isGenuineIntel)
+ {
+ cpuidInfo = X86Base.CpuId(1, 0);
+ Debug.Assert((cpuidInfo.Edx & (1 << 15)) != 0); // CMOV
+ int model = (cpuidInfo.Eax >> 4) & 0xF;
+ int family = (cpuidInfo.Eax >> 8) & 0xF;
+ int extendedModel = (cpuidInfo.Eax >> 16) & 0xF;
+
+ if (family == 0x06)
+ {
+ if (extendedModel == 0x05)
+ {
+ if (model == 0x05)
+ {
+ // * Skylake (Server)
+ // * Cascade Lake
+ // * Cooper Lake
+
+ flags |= InstructionSetSupportFlags.Vector512Throttling;
+ }
+ }
+ else if (extendedModel == 0x06)
+ {
+ if (model == 0x06)
+ {
+ // * Cannon Lake
+
+ flags |= InstructionSetSupportFlags.Vector512Throttling;
+ }
+ }
+ }
+ }
+
+ if ((flags & InstructionSetSupportFlags.Vector512Throttling) != 0 && logger.IsVerbose)
+ logger.LogMessage("Vector512 is throttled");
+ }
+
if (logger.IsVerbose)
logger.LogMessage($"The 'native' instruction set expanded to {instructionSetSupportBuilder}");
}
@@ -198,7 +246,8 @@ namespace System.CommandLine
unsupportedInstructionSet,
optimisticInstructionSet,
InstructionSetSupportBuilder.GetNonSpecifiableInstructionSetsForArch(targetArchitecture),
- targetArchitecture);
+ targetArchitecture,
+ flags);
}
}
}
diff --git a/src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs b/src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs
index 01071442f962..6fc5d9542e16 100644
--- a/src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs
+++ b/src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs
@@ -16,7 +16,7 @@ namespace Internal.Runtime
public const uint Signature = 0x00525452; // 'RTR'
public const ushort CurrentMajorVersion = 9;
- public const ushort CurrentMinorVersion = 1;
+ public const ushort CurrentMinorVersion = 2;
}
#if READYTORUN
#pragma warning disable 0169
diff --git a/src/coreclr/tools/Common/Internal/Runtime/ReadyToRunConstants.cs b/src/coreclr/tools/Common/Internal/Runtime/ReadyToRunConstants.cs
index 63383b7ddfa6..a37945534865 100644
--- a/src/coreclr/tools/Common/Internal/Runtime/ReadyToRunConstants.cs
+++ b/src/coreclr/tools/Common/Internal/Runtime/ReadyToRunConstants.cs
@@ -239,7 +239,9 @@ namespace Internal.ReadyToRunConstants
Stelem_Ref = 0x38,
Ldelema_Ref = 0x39,
- MemSet = 0x40,
+ MemZero = 0x3E,
+ MemSet = 0x3F,
+ NativeMemSet = 0x40,
MemCpy = 0x41,
// P/Invoke support
diff --git a/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs b/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs
index 2a9dbe302dac..5346806c1aff 100644
--- a/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs
+++ b/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs
@@ -214,7 +214,10 @@ namespace Internal.JitInterface
CORINFO_HELP_INIT_PINVOKE_FRAME, // initialize an inlined PInvoke Frame for the JIT-compiler
CORINFO_HELP_MEMSET, // Init block of memory
+ CORINFO_HELP_MEMZERO, // Init block of memory with zeroes
CORINFO_HELP_MEMCPY, // Copy block of memory
+ CORINFO_HELP_NATIVE_MEMSET, // Init block of memory using native memset (not safe for pDst being null,
+ // not safe for unbounded size, does not trigger GC)
CORINFO_HELP_RUNTIMEHANDLE_METHOD, // determine a type/field/method handle at run-time
CORINFO_HELP_RUNTIMEHANDLE_METHOD_LOG, // determine a type/field/method handle at run-time, with IBC logging
diff --git a/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs b/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs
index 4d291954fe42..5d10b4159fec 100644
--- a/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs
+++ b/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs
@@ -4118,6 +4118,9 @@ namespace Internal.JitInterface
case TargetArchitecture.X86:
Debug.Assert(InstructionSet.X86_SSE2 == InstructionSet.X64_SSE2);
Debug.Assert(_compilation.InstructionSetSupport.IsInstructionSetSupported(InstructionSet.X86_SSE2));
+
+ if ((_compilation.InstructionSetSupport.Flags & InstructionSetSupportFlags.Vector512Throttling) != 0)
+ flags.Set(CorJitFlag.CORJIT_FLAG_VECTOR512_THROTTLING);
break;
case TargetArchitecture.ARM64:
diff --git a/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs b/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs
index eb7b71e870e2..1d9ef515d4e4 100644
--- a/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs
+++ b/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs
@@ -130,7 +130,6 @@ namespace Internal.JitInterface
private static string GetTargetSpec(TargetDetails target)
{
- string targetOSComponent = (target.OperatingSystem == TargetOS.Windows ? "win" : "unix");
string targetArchComponent = target.Architecture switch
{
TargetArchitecture.X86 => "x86",
@@ -142,10 +141,21 @@ namespace Internal.JitInterface
_ => throw new NotImplementedException(target.Architecture.ToString())
};
- if ((target.Architecture == TargetArchitecture.ARM64) || (target.Architecture == TargetArchitecture.ARM))
+ string targetOSComponent;
+ if (target.Architecture is TargetArchitecture.ARM64 or TargetArchitecture.ARM)
{
targetOSComponent = "universal";
}
+#if !READYTORUN
+ else if (target.OperatingSystem == TargetOS.Windows && target.Architecture == TargetArchitecture.X86)
+ {
+ targetOSComponent = "win_aot";
+ }
+#endif
+ else
+ {
+ targetOSComponent = target.OperatingSystem == TargetOS.Windows ? "win" : "unix";
+ }
return targetOSComponent + '_' + targetArchComponent + "_" + RuntimeInformation.ProcessArchitecture.ToString().ToLowerInvariant();
}
diff --git a/src/coreclr/tools/Common/JitInterface/LoongArch64PassStructInRegister.cs b/src/coreclr/tools/Common/JitInterface/LoongArch64PassStructInRegister.cs
index 8592386dbb7c..b0c193d09500 100644
--- a/src/coreclr/tools/Common/JitInterface/LoongArch64PassStructInRegister.cs
+++ b/src/coreclr/tools/Common/JitInterface/LoongArch64PassStructInRegister.cs
@@ -55,11 +55,7 @@ namespace Internal.JitInterface
int fieldIndex = 0;
foreach (FieldDesc field in typeDesc.GetFields())
{
- if (fieldIndex > 1)
- {
- return (uint)StructFloatFieldInfoFlags.STRUCT_NO_FLOAT_FIELD;
- }
- else if (field.IsStatic)
+ if (field.IsStatic)
{
continue;
}
@@ -162,6 +158,11 @@ namespace Internal.JitInterface
default:
{
+ if ((numIntroducedFields == 2) && (field.FieldType.Category == TypeFlags.Class))
+ {
+ return (uint)StructFloatFieldInfoFlags.STRUCT_NO_FLOAT_FIELD;
+ }
+
if (field.FieldType.GetElementSize().AsInt == 8)
{
if (numIntroducedFields > 1)
diff --git a/src/coreclr/tools/Common/TypeSystem/Common/ArrayType.cs b/src/coreclr/tools/Common/TypeSystem/Common/ArrayType.cs
index 4bd5f2b49b32..768f5f7eaa4f 100644
--- a/src/coreclr/tools/Common/TypeSystem/Common/ArrayType.cs
+++ b/src/coreclr/tools/Common/TypeSystem/Common/ArrayType.cs
@@ -260,9 +260,18 @@ namespace Internal.TypeSystem
case ArrayMethodKind.AddressWithHiddenArg:
{
var parameters = new TypeDesc[_owningType.Rank + 1];
- parameters[0] = Context.GetPointerType(Context.GetWellKnownType(WellKnownType.Void));
- for (int i = 0; i < _owningType.Rank; i++)
- parameters[i + 1] = _owningType.Context.GetWellKnownType(WellKnownType.Int32);
+ if (Context.Target.Architecture == TargetArchitecture.X86)
+ {
+ for (int i = 0; i < _owningType.Rank; i++)
+ parameters[i] = _owningType.Context.GetWellKnownType(WellKnownType.Int32);
+ parameters[_owningType.Rank] = Context.GetPointerType(Context.GetWellKnownType(WellKnownType.Void));
+ }
+ else
+ {
+ parameters[0] = Context.GetPointerType(Context.GetWellKnownType(WellKnownType.Void));
+ for (int i = 0; i < _owningType.Rank; i++)
+ parameters[i + 1] = _owningType.Context.GetWellKnownType(WellKnownType.Int32);
+ }
_signature = new MethodSignature(0, 0, _owningType.ElementType.MakeByRefType(), parameters, MethodSignature.EmbeddedSignatureMismatchPermittedFlag);
}
break;
diff --git a/src/coreclr/tools/Common/TypeSystem/IL/Stubs/ArrayMethodILEmitter.cs b/src/coreclr/tools/Common/TypeSystem/IL/Stubs/ArrayMethodILEmitter.cs
index b90abeeb79ab..69aec3abb713 100644
--- a/src/coreclr/tools/Common/TypeSystem/IL/Stubs/ArrayMethodILEmitter.cs
+++ b/src/coreclr/tools/Common/TypeSystem/IL/Stubs/ArrayMethodILEmitter.cs
@@ -81,7 +81,9 @@ namespace Internal.IL.Stubs
int pointerSize = context.Target.PointerSize;
- int argStartOffset = _method.Kind == ArrayMethodKind.AddressWithHiddenArg ? 2 : 1;
+ bool isX86 = context.Target.Architecture == TargetArchitecture.X86;
+ int argStartOffset = !isX86 && _method.Kind == ArrayMethodKind.AddressWithHiddenArg ? 2 : 1;
+ int hiddenArg = !isX86 ? 1 : 1 + _rank;
var rangeExceptionLabel = _emitter.NewCodeLabel();
ILCodeLabel typeMismatchExceptionLabel = null;
@@ -112,7 +114,7 @@ namespace Internal.IL.Stubs
// As per ECMA-335 III.2.3, the prefix suppresses the type check.
// if (hiddenArg == IntPtr.Zero)
// goto TypeCheckPassed;
- codeStream.EmitLdArg(1);
+ codeStream.EmitLdArg(hiddenArg);
codeStream.Emit(ILOpcode.brfalse, typeCheckPassedLabel);
// MethodTable* actualElementType = this.m_pEEType->RelatedParameterType; // ArrayElementType
@@ -122,7 +124,7 @@ namespace Internal.IL.Stubs
_emitter.NewToken(eetypeType.GetKnownMethod("get_RelatedParameterType", null)));
// MethodTable* expectedElementType = hiddenArg->RelatedParameterType; // ArrayElementType
- codeStream.EmitLdArg(1);
+ codeStream.EmitLdArg(hiddenArg);
codeStream.Emit(ILOpcode.call,
_emitter.NewToken(eetypeType.GetKnownMethod("get_RelatedParameterType", null)));
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/CompilerTypeSystemContext.BoxedTypes.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/CompilerTypeSystemContext.BoxedTypes.cs
index 9236e10d2c35..85e9f484f44b 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/CompilerTypeSystemContext.BoxedTypes.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/CompilerTypeSystemContext.BoxedTypes.cs
@@ -442,21 +442,34 @@ namespace ILCompiler
ILEmitter emit = new ILEmitter();
ILCodeStream codeStream = emit.NewCodeStream();
+ bool isX86 = Context.Target.Architecture == TargetArchitecture.X86;
+
FieldDesc eeTypeField = Context.GetWellKnownType(WellKnownType.Object).GetKnownField("m_pEEType");
// Load ByRef to the field with the value of the boxed valuetype
codeStream.EmitLdArg(0);
codeStream.Emit(ILOpcode.ldflda, emit.NewToken(Context.SystemModule.GetKnownType("System.Runtime.CompilerServices", "RawData").GetField("Data")));
+ if (isX86)
+ {
+ for (int i = 0; i < _targetMethod.Signature.Length; i++)
+ {
+ codeStream.EmitLdArg(i + 1);
+ }
+ }
+
// Load the MethodTable of the boxed valuetype (this is the hidden generic context parameter expected
// by the (canonical) instance method, but normally not part of the signature in IL).
codeStream.EmitLdArg(0);
codeStream.Emit(ILOpcode.ldfld, emit.NewToken(eeTypeField));
// Load rest of the arguments
- for (int i = 0; i < _targetMethod.Signature.Length; i++)
+ if (!isX86)
{
- codeStream.EmitLdArg(i + 1);
+ for (int i = 0; i < _targetMethod.Signature.Length; i++)
+ {
+ codeStream.EmitLdArg(i + 1);
+ }
}
// Call an instance method on the target valuetype that has a fake instantiation parameter
@@ -608,9 +621,18 @@ namespace ILCompiler
// Shared instance methods on generic valuetypes have a hidden parameter with the generic context.
// We add it to the signature so that we can refer to it from IL.
- parameters[0] = Context.GetWellKnownType(WellKnownType.Void).MakePointerType();
- for (int i = 0; i < _methodRepresented.Signature.Length; i++)
- parameters[i + 1] = _methodRepresented.Signature[i];
+ if (Context.Target.Architecture == TargetArchitecture.X86)
+ {
+ for (int i = 0; i < _methodRepresented.Signature.Length; i++)
+ parameters[i] = _methodRepresented.Signature[i];
+ parameters[_methodRepresented.Signature.Length] = Context.GetWellKnownType(WellKnownType.Void).MakePointerType();
+ }
+ else
+ {
+ parameters[0] = Context.GetWellKnownType(WellKnownType.Void).MakePointerType();
+ for (int i = 0; i < _methodRepresented.Signature.Length; i++)
+ parameters[i + 1] = _methodRepresented.Signature[i];
+ }
_signature = new MethodSignature(_methodRepresented.Signature.Flags,
_methodRepresented.Signature.GenericParameterCount,
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/CompilerTypeSystemContext.InterfaceThunks.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/CompilerTypeSystemContext.InterfaceThunks.cs
index 53c34efc0ef7..92a5be3fed38 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/CompilerTypeSystemContext.InterfaceThunks.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/CompilerTypeSystemContext.InterfaceThunks.cs
@@ -216,9 +216,19 @@ namespace ILCompiler
MethodDesc getOrdinalInterfaceMethod = Context.GetHelperEntryPoint("SharedCodeHelpers", "GetOrdinalInterface");
MethodDesc getCurrentContext = Context.GetHelperEntryPoint("SharedCodeHelpers", "GetCurrentSharedThunkContext");
+ bool isX86 = Context.Target.Architecture == TargetArchitecture.X86;
+
// Load "this"
codeStream.EmitLdArg(0);
+ if (isX86)
+ {
+ for (int i = 0; i < _targetMethod.Signature.Length; i++)
+ {
+ codeStream.EmitLdArg(i + 1);
+ }
+ }
+
// Load the instantiating argument.
if (_interfaceIndex == UseContextFromRuntime)
{
@@ -232,10 +242,13 @@ namespace ILCompiler
codeStream.Emit(ILOpcode.call, emit.NewToken(getOrdinalInterfaceMethod));
}
- // Load rest of the arguments
- for (int i = 0; i < _targetMethod.Signature.Length; i++)
+ if (!isX86)
{
- codeStream.EmitLdArg(i + 1);
+ // Load rest of the arguments
+ for (int i = 0; i < _targetMethod.Signature.Length; i++)
+ {
+ codeStream.EmitLdArg(i + 1);
+ }
}
// Call an instance method on the target interface that has a fake instantiation parameter
@@ -292,9 +305,18 @@ namespace ILCompiler
// Shared instance methods on generic interfaces have a hidden parameter with the generic context.
// We add it to the signature so that we can refer to it from IL.
- parameters[0] = Context.GetWellKnownType(WellKnownType.IntPtr);
- for (int i = 0; i < _methodRepresented.Signature.Length; i++)
- parameters[i + 1] = _methodRepresented.Signature[i];
+ if (Context.Target.Architecture == TargetArchitecture.X86)
+ {
+ for (int i = 0; i < _methodRepresented.Signature.Length; i++)
+ parameters[i] = _methodRepresented.Signature[i];
+ parameters[_methodRepresented.Signature.Length] = Context.GetWellKnownType(WellKnownType.Void).MakePointerType();
+ }
+ else
+ {
+ parameters[0] = Context.GetWellKnownType(WellKnownType.IntPtr);
+ for (int i = 0; i < _methodRepresented.Signature.Length; i++)
+ parameters[i + 1] = _methodRepresented.Signature[i];
+ }
_signature = new MethodSignature(_methodRepresented.Signature.Flags,
_methodRepresented.Signature.GenericParameterCount,
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/ArrayOfFrozenObjectsNode.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/ArrayOfFrozenObjectsNode.cs
index bb77597f53f0..485ff593583b 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/ArrayOfFrozenObjectsNode.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/ArrayOfFrozenObjectsNode.cs
@@ -2,6 +2,7 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System;
+using System.Diagnostics;
using Internal.Text;
using Internal.TypeSystem;
@@ -33,6 +34,7 @@ namespace ILCompiler.DependencyAnalysis
builder.AddSymbol(this);
foreach (FrozenObjectNode node in factory.MetadataManager.GetFrozenObjects())
{
+ Debug.Assert(node is not FrozenObjectNode frozenObj || !frozenObj.ObjectType.RequiresAlign8());
AlignNextObject(ref builder, factory);
node.InitializeOffsetFromBeginningOfArray(builder.CountBytes);
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/GenericLookupResult.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/GenericLookupResult.cs
index 0cf2a6bb44e4..efbc64f88799 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/GenericLookupResult.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/GenericLookupResult.cs
@@ -11,42 +11,6 @@ using ILCompiler.DependencyAnalysisFramework;
namespace ILCompiler.DependencyAnalysis
{
- // Represents a generic lookup within a canonical method body.
- // TODO: unify with NativeFormat.FixupSignatureKind
- public enum LookupResultType
- {
- Invalid,
- MethodTable, // a type
- UnwrapNullable, // a type (The type T described by a type spec that is generic over Nullable<T>)
- NonGcStatic, // the non-gc statics of a type
- GcStatic, // the gc statics of a type
- Method, // a method
- InterfaceDispatchCell, // the dispatch cell for calling an interface method
- MethodDictionary, // a dictionary for calling a generic method
- UnboxingStub, // the unboxing stub for a method
- ArrayType, // an array of type
- DefaultCtor, // default ctor of a type
- AllocObject, // the allocator of a type
- GvmVtableOffset, // vtable offset of a generic virtual method
- ProfileCounter, // profiling counter cell
- MethodLdToken, // a ldtoken result for a method
- FieldLdToken, // a ldtoken result for a field
- Field, // a field descriptor
- IsInst, // isinst helper
- CastClass, // castclass helper
- AllocArray, // the array allocator of a type
- TypeSize, // size of the type
- FieldOffset, // field offset
- CallingConvention_NoInstParam, // CallingConventionConverterThunk NO_INSTANTIATING_PARAM
- CallingConvention_HasInstParam, // CallingConventionConverterThunk HAS_INSTANTIATING_PARAM
- CallingConvention_MaybeInstParam, // CallingConventionConverterThunk MAYBE_INSTANTIATING_PARAM
- VtableOffset, // Offset of a virtual method into the type's vtable
- Constrained, // ConstrainedCallDesc
- ConstrainedDirect, // Direct ConstrainedCallDesc
- Integer, // Integer
- UnboxingMethod, // UnboxingMethod
- }
-
public struct GenericLookupResultContext
{
private readonly TypeSystemEntity _canonicalOwner;
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/Target_X86/X86ReadyToRunGenericHelperNode.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/Target_X86/X86ReadyToRunGenericHelperNode.cs
index 079a38cc7460..e61f9c5b088c 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/Target_X86/X86ReadyToRunGenericHelperNode.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/Target_X86/X86ReadyToRunGenericHelperNode.cs
@@ -1,15 +1,254 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
+using System;
+
using ILCompiler.DependencyAnalysis.X86;
+using Internal.TypeSystem;
+
+using Debug = System.Diagnostics.Debug;
+
namespace ILCompiler.DependencyAnalysis
{
public partial class ReadyToRunGenericHelperNode
{
+ protected void EmitDictionaryLookup(NodeFactory factory, ref X86Emitter encoder, Register context, Register result, GenericLookupResult lookup, bool relocsOnly)
+ {
+ // INVARIANT: must not trash context register
+
+ // Find the generic dictionary slot
+ int dictionarySlot = 0;
+ if (!relocsOnly)
+ {
+ // The concrete slot won't be known until we're emitting data - don't ask for it in relocsOnly.
+ if (!factory.GenericDictionaryLayout(_dictionaryOwner).TryGetSlotForEntry(lookup, out dictionarySlot))
+ {
+ encoder.EmitZeroReg(result);
+ return;
+ }
+ }
+
+ // Load the generic dictionary cell
+ AddrMode loadEntry = new AddrMode(
+ context, null, dictionarySlot * factory.Target.PointerSize, 0, AddrModeSize.Int32);
+ encoder.EmitMOV(result, ref loadEntry);
+
+ // If there's any invalid entries, we need to test for them
+ //
+ // Skip this in relocsOnly to make it easier to weed out bugs - the _hasInvalidEntries
+ // flag can change over the course of compilation and the bad slot helper dependency
+ // should be reported by someone else - the system should not rely on it coming from here.
+ if (!relocsOnly && _hasInvalidEntries)
+ {
+ AddrMode resultAddr = new AddrMode(Register.RegDirect | result, null, 0, 0, AddrModeSize.Int32);
+ encoder.EmitCMP(ref resultAddr, 0);
+ encoder.EmitJE(GetBadSlotHelper(factory));
+ }
+ }
+
protected sealed override void EmitCode(NodeFactory factory, ref X86Emitter encoder, bool relocsOnly)
{
- encoder.EmitINT3();
+ // First load the generic context into the context register.
+ EmitLoadGenericContext(factory, ref encoder, relocsOnly);
+
+ switch (_id)
+ {
+ case ReadyToRunHelperId.GetNonGCStaticBase:
+ {
+ if (!TriggersLazyStaticConstructor(factory))
+ {
+ EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, _lookupSignature, relocsOnly);
+ encoder.EmitRET();
+ }
+ else
+ {
+ EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg0, _lookupSignature, relocsOnly);
+ encoder.EmitMOV(encoder.TargetRegister.Result, encoder.TargetRegister.Arg0);
+
+ // We need to trigger the cctor before returning the base. It is stored at the beginning of the non-GC statics region.
+ int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target);
+ AddrMode initialized = new AddrMode(encoder.TargetRegister.Arg0, null, -cctorContextSize, 0, AddrModeSize.Int32);
+ encoder.EmitCMP(ref initialized, 0);
+ encoder.EmitRETIfEqual();
+
+ AddrMode loadCctor = new AddrMode(encoder.TargetRegister.Arg0, null, -cctorContextSize, 0, AddrModeSize.Int32);
+ encoder.EmitLEA(encoder.TargetRegister.Arg0, ref loadCctor);
+ encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Result);
+ encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnNonGCStaticBase));
+ }
+ }
+ break;
+
+ case ReadyToRunHelperId.GetGCStaticBase:
+ {
+ MetadataType target = (MetadataType)_target;
+
+ EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, _lookupSignature, relocsOnly);
+
+ AddrMode loadFromResult = new AddrMode(encoder.TargetRegister.Result, null, 0, 0, AddrModeSize.Int32);
+ encoder.EmitMOV(encoder.TargetRegister.Result, ref loadFromResult);
+
+ if (!TriggersLazyStaticConstructor(factory))
+ {
+ encoder.EmitRET();
+ }
+ else
+ {
+ // We need to trigger the cctor before returning the base. It is stored at the beginning of the non-GC statics region.
+ GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(target);
+ EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg0, nonGcRegionLookup, relocsOnly);
+
+ int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target);
+ AddrMode initialized = new AddrMode(encoder.TargetRegister.Arg0, null, -cctorContextSize, 0, AddrModeSize.Int32);
+ encoder.EmitCMP(ref initialized, 0);
+ encoder.EmitRETIfEqual();
+
+ encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Result);
+ AddrMode loadCctor = new AddrMode(encoder.TargetRegister.Arg0, null, -cctorContextSize, 0, AddrModeSize.Int32);
+ encoder.EmitLEA(encoder.TargetRegister.Arg0, ref loadCctor);
+
+ encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnGCStaticBase));
+ }
+ }
+ break;
+
+ case ReadyToRunHelperId.GetThreadStaticBase:
+ {
+ MetadataType target = (MetadataType)_target;
+
+ // Look up the index cell
+ EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg1, _lookupSignature, relocsOnly);
+
+ ISymbolNode helperEntrypoint;
+ if (TriggersLazyStaticConstructor(factory))
+ {
+ // There is a lazy class constructor. We need the non-GC static base because that's where the
+ // class constructor context lives.
+ GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(target);
+ EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, nonGcRegionLookup, relocsOnly);
+ int cctorContextSize = NonGCStaticsNode.GetClassConstructorContextSize(factory.Target);
+ AddrMode loadCctor = new AddrMode(encoder.TargetRegister.Result, null, -cctorContextSize, 0, AddrModeSize.Int32);
+ encoder.EmitLEA(encoder.TargetRegister.Result, ref loadCctor);
+
+ AddrMode storeAtEspPlus4 = new AddrMode(Register.ESP, null, 4, 0, AddrModeSize.Int32);
+ encoder.EmitStackDup();
+ encoder.EmitMOV(ref storeAtEspPlus4, encoder.TargetRegister.Result);
+
+ helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnThreadStaticBase);
+ }
+ else
+ {
+ helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.GetThreadStaticBaseForType);
+ }
+
+ // First arg: address of the TypeManager slot that provides the helper with
+ // information about module index and the type manager instance (which is used
+ // for initialization on first access).
+ AddrMode loadFromArg1 = new AddrMode(encoder.TargetRegister.Arg1, null, 0, 0, AddrModeSize.Int32);
+ encoder.EmitMOV(encoder.TargetRegister.Arg0, ref loadFromArg1);
+
+ // Second arg: index of the type in the ThreadStatic section of the modules
+ AddrMode loadFromArg1AndDelta = new AddrMode(encoder.TargetRegister.Arg1, null, factory.Target.PointerSize, 0, AddrModeSize.Int32);
+ encoder.EmitMOV(encoder.TargetRegister.Arg1, ref loadFromArg1AndDelta);
+
+ encoder.EmitJMP(helperEntrypoint);
+ }
+ break;
+
+ case ReadyToRunHelperId.DelegateCtor:
+ {
+ // This is a weird helper. Codegen populated Arg0 and Arg1 with the values that the constructor
+ // method expects. Codegen also passed us the generic context on stack.
+ // We now need to load the delegate target method on the stack (using a dictionary lookup)
+ // and the optional 4th parameter, and call the ctor.
+
+ var target = (DelegateCreationInfo)_target;
+
+ // EmitLoadGenericContext loaded the context from stack into Result
+ EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Result, encoder.TargetRegister.Result, _lookupSignature, relocsOnly);
+
+ AddrMode storeAtEspPlus4 = new AddrMode(Register.ESP, null, 4, 0, AddrModeSize.Int32);
+ if (target.Thunk != null)
+ {
+ Debug.Assert(target.Constructor.Method.Signature.Length == 3);
+ AddrMode storeAtEspPlus8 = new AddrMode(Register.ESP, null, 8, 0, AddrModeSize.Int32);
+ encoder.EmitStackDup();
+ encoder.EmitMOV(ref storeAtEspPlus8, encoder.TargetRegister.Result);
+ encoder.EmitMOV(encoder.TargetRegister.Result, target.Thunk);
+ encoder.EmitMOV(ref storeAtEspPlus4, encoder.TargetRegister.Result);
+ }
+ else
+ {
+ Debug.Assert(target.Constructor.Method.Signature.Length == 2);
+ encoder.EmitMOV(ref storeAtEspPlus4, encoder.TargetRegister.Result);
+ }
+
+ encoder.EmitJMP(target.Constructor);
+ }
+ break;
+
+ // These are all simple: just get the thing from the dictionary and we're done
+ case ReadyToRunHelperId.TypeHandle:
+ case ReadyToRunHelperId.MethodHandle:
+ case ReadyToRunHelperId.FieldHandle:
+ case ReadyToRunHelperId.MethodDictionary:
+ case ReadyToRunHelperId.MethodEntry:
+ case ReadyToRunHelperId.VirtualDispatchCell:
+ case ReadyToRunHelperId.DefaultConstructor:
+ case ReadyToRunHelperId.ObjectAllocator:
+ case ReadyToRunHelperId.TypeHandleForCasting:
+ case ReadyToRunHelperId.ConstrainedDirectCall:
+ {
+ EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Result, _lookupSignature, relocsOnly);
+ encoder.EmitRET();
+ }
+ break;
+ default:
+ throw new NotImplementedException();
+ }
+ }
+
+ protected virtual void EmitLoadGenericContext(NodeFactory factory, ref X86Emitter encoder, bool relocsOnly)
+ {
+ // Assume generic context is already loaded in the context register.
+ if (Id == ReadyToRunHelperId.DelegateCtor)
+ {
+ AddrMode loadAtEspPlus4 = new AddrMode(Register.ESP, null, 4, 0, AddrModeSize.Int32);
+ encoder.EmitMOV(encoder.TargetRegister.Result, ref loadAtEspPlus4);
+ }
+ }
+ }
+
+ public partial class ReadyToRunGenericLookupFromTypeNode
+ {
+ protected override void EmitLoadGenericContext(NodeFactory factory, ref X86Emitter encoder, bool relocsOnly)
+ {
+ // We start with context register pointing to the MethodTable
+ Register contextRegister = encoder.TargetRegister.Arg0;
+
+ // Locate the VTable slot that points to the dictionary
+ int vtableSlot = 0;
+ if (!relocsOnly)
+ {
+ // The concrete slot won't be known until we're emitting data - don't ask for it in relocsOnly.
+ vtableSlot = VirtualMethodSlotHelper.GetGenericDictionarySlot(factory, (TypeDesc)_dictionaryOwner);
+ }
+
+ int pointerSize = factory.Target.PointerSize;
+ int slotOffset = EETypeNode.GetVTableOffset(pointerSize) + (vtableSlot * pointerSize);
+
+ // DelegateCtor is special, the context is on stack
+ if (Id == ReadyToRunHelperId.DelegateCtor)
+ {
+ AddrMode loadAtEspPlus4 = new AddrMode(Register.ESP, null, 4, 0, AddrModeSize.Int32);
+ encoder.EmitMOV(encoder.TargetRegister.Result, ref loadAtEspPlus4);
+ contextRegister = encoder.TargetRegister.Result;
+ }
+
+ // Load the dictionary pointer from the VTable
+ AddrMode loadDictionary = new AddrMode(contextRegister, null, slotOffset, 0, AddrModeSize.Int32);
+ encoder.EmitMOV(contextRegister, ref loadDictionary);
}
}
}
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/Target_X86/X86ReadyToRunHelperNode.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/Target_X86/X86ReadyToRunHelperNode.cs
index 255ec2f0b016..c6f6408ad676 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/Target_X86/X86ReadyToRunHelperNode.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/Target_X86/X86ReadyToRunHelperNode.cs
@@ -2,6 +2,7 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System;
+using System.Diagnostics;
using ILCompiler.DependencyAnalysis.X86;
@@ -20,7 +21,25 @@ namespace ILCompiler.DependencyAnalysis
{
case ReadyToRunHelperId.VirtualCall:
{
- encoder.EmitINT3();
+ MethodDesc targetMethod = (MethodDesc)Target;
+
+ Debug.Assert(!targetMethod.OwningType.IsInterface);
+ Debug.Assert(!targetMethod.CanMethodBeInSealedVTable(factory));
+
+ AddrMode loadFromThisPtr = new AddrMode(encoder.TargetRegister.Arg0, null, 0, 0, AddrModeSize.Int32);
+ encoder.EmitMOV(encoder.TargetRegister.Result, ref loadFromThisPtr);
+
+ int pointerSize = factory.Target.PointerSize;
+
+ int slot = 0;
+ if (!relocsOnly)
+ {
+ slot = VirtualMethodSlotHelper.GetVirtualMethodSlot(factory, targetMethod, targetMethod.OwningType);
+ Debug.Assert(slot != -1);
+ }
+
+ AddrMode jmpAddrMode = new AddrMode(encoder.TargetRegister.Result, null, EETypeNode.GetVTableOffset(pointerSize) + (slot * pointerSize), 0, AddrModeSize.Int32);
+ encoder.EmitJmpToAddrMode(ref jmpAddrMode);
}
break;
@@ -51,19 +70,119 @@ namespace ILCompiler.DependencyAnalysis
case ReadyToRunHelperId.GetThreadStaticBase:
{
- encoder.EmitINT3();
+ MetadataType target = (MetadataType)Target;
+ ISortableSymbolNode index = factory.TypeThreadStaticIndex(target);
+ if (index is TypeThreadStaticIndexNode ti && ti.IsInlined)
+ {
+ throw new NotImplementedException();
+ }
+ else
+ {
+ encoder.EmitMOV(encoder.TargetRegister.Result, index);
+
+ // First arg: address of the TypeManager slot that provides the helper with
+ // information about module index and the type manager instance (which is used
+ // for initialization on first access).
+ AddrMode loadFromEax = new AddrMode(encoder.TargetRegister.Result, null, 0, 0, AddrModeSize.Int32);
+ encoder.EmitMOV(encoder.TargetRegister.Arg0, ref loadFromEax);
+
+ // Second arg: index of the type in the ThreadStatic section of the modules
+ AddrMode loadFromEaxAndDelta = new AddrMode(encoder.TargetRegister.Result, null, factory.Target.PointerSize, 0, AddrModeSize.Int32);
+ encoder.EmitMOV(encoder.TargetRegister.Arg1, ref loadFromEaxAndDelta);
+
+ ISymbolNode helper = factory.HelperEntrypoint(HelperEntrypoint.GetThreadStaticBaseForType);
+ if (!factory.PreinitializationManager.HasLazyStaticConstructor(target))
+ {
+ encoder.EmitJMP(helper);
+ }
+ else
+ {
+ encoder.EmitMOV(encoder.TargetRegister.Result, factory.TypeNonGCStaticsSymbol(target), -NonGCStaticsNode.GetClassConstructorContextSize(factory.Target));
+
+ AddrMode initialized = new AddrMode(encoder.TargetRegister.Result, null, 0, 0, AddrModeSize.Int32);
+ encoder.EmitCMP(ref initialized, 0);
+ encoder.EmitJE(helper);
+
+ // Add extra parameter and tail call
+ encoder.EmitStackDup();
+ AddrMode storeAtEspPlus4 = new AddrMode(Register.ESP, null, 4, 0, AddrModeSize.Int32);
+ encoder.EmitMOV(ref storeAtEspPlus4, encoder.TargetRegister.Result);
+
+ encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnThreadStaticBase));
+ }
+ }
}
break;
case ReadyToRunHelperId.GetGCStaticBase:
{
- encoder.EmitINT3();
+ MetadataType target = (MetadataType)Target;
+ bool hasLazyStaticConstructor = factory.PreinitializationManager.HasLazyStaticConstructor(target);
+ encoder.EmitMOV(encoder.TargetRegister.Result, factory.TypeGCStaticsSymbol(target));
+ AddrMode loadFromEax = new AddrMode(encoder.TargetRegister.Result, null, 0, 0, AddrModeSize.Int32);
+ encoder.EmitMOV(encoder.TargetRegister.Result, ref loadFromEax);
+
+ if (!hasLazyStaticConstructor)
+ {
+ encoder.EmitRET();
+ }
+ else
+ {
+ // We need to trigger the cctor before returning the base. It is stored at the beginning of the non-GC statics region.
+ encoder.EmitMOV(encoder.TargetRegister.Arg0, factory.TypeNonGCStaticsSymbol(target), -NonGCStaticsNode.GetClassConstructorContextSize(factory.Target));
+
+ AddrMode initialized = new AddrMode(encoder.TargetRegister.Arg0, null, 0, 0, AddrModeSize.Int32);
+ encoder.EmitCMP(ref initialized, 0);
+ encoder.EmitRETIfEqual();
+
+ encoder.EmitMOV(encoder.TargetRegister.Arg1, encoder.TargetRegister.Result);
+ encoder.EmitJMP(factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnGCStaticBase));
+ }
}
break;
case ReadyToRunHelperId.DelegateCtor:
{
- encoder.EmitINT3();
+ DelegateCreationInfo target = (DelegateCreationInfo)Target;
+
+ encoder.EmitStackDup();
+
+ if (target.TargetNeedsVTableLookup)
+ {
+ Debug.Assert(!target.TargetMethod.CanMethodBeInSealedVTable(factory));
+
+ AddrMode loadFromThisPtr = new AddrMode(encoder.TargetRegister.Arg1, null, 0, 0, AddrModeSize.Int32);
+ encoder.EmitMOV(encoder.TargetRegister.Result, ref loadFromThisPtr);
+
+ int slot = 0;
+ if (!relocsOnly)
+ slot = VirtualMethodSlotHelper.GetVirtualMethodSlot(factory, target.TargetMethod, target.TargetMethod.OwningType);
+
+ Debug.Assert(slot != -1);
+ AddrMode loadFromSlot = new AddrMode(encoder.TargetRegister.Result, null, EETypeNode.GetVTableOffset(factory.Target.PointerSize) + (slot * factory.Target.PointerSize), 0, AddrModeSize.Int32);
+ encoder.EmitMOV(encoder.TargetRegister.Result, ref loadFromSlot);
+ }
+ else
+ {
+ encoder.EmitMOV(encoder.TargetRegister.Result, target.GetTargetNode(factory));
+ }
+
+ AddrMode storeAtEspPlus4 = new AddrMode(Register.ESP, null, 4, 0, AddrModeSize.Int32);
+ if (target.Thunk != null)
+ {
+ Debug.Assert(target.Constructor.Method.Signature.Length == 3);
+ AddrMode storeAtEspPlus8 = new AddrMode(Register.ESP, null, 8, 0, AddrModeSize.Int32);
+ encoder.EmitStackDup();
+ encoder.EmitMOV(ref storeAtEspPlus8, encoder.TargetRegister.Result);
+ encoder.EmitMOV(ref storeAtEspPlus4, target.Thunk);
+ }
+ else
+ {
+ Debug.Assert(target.Constructor.Method.Signature.Length == 2);
+ encoder.EmitMOV(ref storeAtEspPlus4, encoder.TargetRegister.Result);
+ }
+
+ encoder.EmitJMP(target.Constructor);
}
break;
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/JitHelper.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/JitHelper.cs
index 8bcf658230b0..cf1d04ca666a 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/JitHelper.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/JitHelper.cs
@@ -139,10 +139,16 @@ namespace ILCompiler
break;
case ReadyToRunHelper.MemCpy:
- mangledName = "memcpy"; // TODO: Null reference handling
+ mangledName = "RhSpanHelpers_MemCopy";
break;
case ReadyToRunHelper.MemSet:
- mangledName = "memset"; // TODO: Null reference handling
+ mangledName = "RhSpanHelpers_MemSet";
+ break;
+ case ReadyToRunHelper.MemZero:
+ mangledName = "RhSpanHelpers_MemZero";
+ break;
+ case ReadyToRunHelper.NativeMemSet:
+ mangledName = "memset";
break;
case ReadyToRunHelper.GetRuntimeTypeHandle:
@@ -341,13 +347,5 @@ namespace ILCompiler
return "RhpNewFast";
}
-
- public static string GetNewArrayHelperForType(TypeDesc type)
- {
- if (type.RequiresAlign8())
- return "RhpNewArrayAlign8";
-
- return "RhpNewArray";
- }
}
}
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/ObjectWriter/CoffObjectWriter.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/ObjectWriter/CoffObjectWriter.cs
index 3198fafaf5d2..6211ca306c3e 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/ObjectWriter/CoffObjectWriter.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/ObjectWriter/CoffObjectWriter.cs
@@ -446,8 +446,8 @@ namespace ILCompiler.ObjectWriter
// Emit RUNTIME_FUNCTION
pdataSectionWriter.EmitAlignment(4);
pdataSectionWriter.EmitSymbolReference(IMAGE_REL_BASED_ADDR32NB, currentSymbolName, start);
- // Only x64 has the End symbol
- if (_machine == Machine.Amd64)
+ // Only x86/x64 has the End symbol
+ if (_machine is Machine.I386 or Machine.Amd64)
{
pdataSectionWriter.EmitSymbolReference(IMAGE_REL_BASED_ADDR32NB, currentSymbolName, end);
}
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/SubstitutedILProvider.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/SubstitutedILProvider.cs
index 975df4243c21..09c54b96c5e8 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/SubstitutedILProvider.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/SubstitutedILProvider.cs
@@ -879,12 +879,16 @@ namespace ILCompiler
return false;
TypeDesc type2 = ReadLdToken(ref reader, methodIL, flags);
- if (type1 == null)
+ if (type2 == null)
return false;
if (!ReadGetTypeFromHandle(ref reader, methodIL, flags))
return false;
+ // No value in making this work for definitions
+ if (type1.IsGenericDefinition || type2.IsGenericDefinition)
+ return false;
+
// Dataflow runs on top of uninstantiated IL and we can't answer some questions there.
// Unfortunately this means dataflow will still see code that the rest of the system
// might have optimized away. It should not be a problem in practice.
diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/TypePreinit.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/TypePreinit.cs
index e2e7a8b756e5..9d594b41bbc9 100644
--- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/TypePreinit.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/TypePreinit.cs
@@ -247,6 +247,11 @@ namespace ILCompiler
return Status.Fail(methodIL.OwningMethod, opcode, "Array out of bounds");
}
+ if (elementType.RequiresAlign8())
+ {
+ return Status.Fail(methodIL.OwningMethod, opcode, "Align8");
+ }
+
AllocationSite allocSite = new AllocationSite(_type, instructionCounter);
stack.Push(new ArrayInstance(elementType.MakeArrayType(), elementCount, allocSite));
}
@@ -537,6 +542,11 @@ namespace ILCompiler
return Status.Fail(methodIL.OwningMethod, opcode, "Needs dataflow analysis");
}
+ if (owningType.RequiresAlign8())
+ {
+ return Status.Fail(methodIL.OwningMethod, opcode, "Align8");
+ }
+
Value[] ctorParameters = new Value[ctorSig.Length + 1];
for (int i = ctorSig.Length - 1; i >= 0; i--)
{
@@ -1623,6 +1633,9 @@ namespace ILCompiler
if (type.IsNullable)
return Status.Fail(methodIL.OwningMethod, opcode);
+ if (type.RequiresAlign8())
+ return Status.Fail(methodIL.OwningMethod, opcode, "Align8");
+
Value value = stack.PopIntoLocation(type);
AllocationSite allocSite = new AllocationSite(_type, instructionCounter);
if (!ObjectInstance.TryBox((DefType)type, value, allocSite, out ObjectInstance boxedResult))
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs
index f7527c96dd9d..ad83b1eb42a5 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs
@@ -1028,9 +1028,15 @@ namespace Internal.JitInterface
case CorInfoHelpFunc.CORINFO_HELP_MEMSET:
id = ReadyToRunHelper.MemSet;
break;
+ case CorInfoHelpFunc.CORINFO_HELP_MEMZERO:
+ id = ReadyToRunHelper.MemZero;
+ break;
case CorInfoHelpFunc.CORINFO_HELP_MEMCPY:
id = ReadyToRunHelper.MemCpy;
break;
+ case CorInfoHelpFunc.CORINFO_HELP_NATIVE_MEMSET:
+ id = ReadyToRunHelper.NativeMemSet;
+ break;
case CorInfoHelpFunc.CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD:
id = ReadyToRunHelper.GetRuntimeMethodHandle;
diff --git a/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ReadyToRunSignature.cs b/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ReadyToRunSignature.cs
index 8d325f467d60..0eae2f10cb8f 100644
--- a/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ReadyToRunSignature.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ReadyToRunSignature.cs
@@ -1685,10 +1685,18 @@ namespace ILCompiler.Reflection.ReadyToRun
builder.Append("MEM_SET");
break;
+ case ReadyToRunHelper.MemZero:
+ builder.Append("MEM_ZERO");
+ break;
+
case ReadyToRunHelper.MemCpy:
builder.Append("MEM_CPY");
break;
+ case ReadyToRunHelper.NativeMemSet:
+ builder.Append("NATIVE_MEM_SET");
+ break;
+
// PInvoke helpers
case ReadyToRunHelper.PInvokeBegin:
builder.Append("PINVOKE_BEGIN");
diff --git a/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs b/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs
index 4495e0322d4a..8755580e3f29 100644
--- a/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs
+++ b/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs
@@ -542,9 +542,15 @@ namespace Internal.JitInterface
case CorInfoHelpFunc.CORINFO_HELP_MEMSET:
id = ReadyToRunHelper.MemSet;
break;
+ case CorInfoHelpFunc.CORINFO_HELP_MEMZERO:
+ id = ReadyToRunHelper.MemZero;
+ break;
case CorInfoHelpFunc.CORINFO_HELP_MEMCPY:
id = ReadyToRunHelper.MemCpy;
break;
+ case CorInfoHelpFunc.CORINFO_HELP_NATIVE_MEMSET:
+ id = ReadyToRunHelper.NativeMemSet;
+ break;
case CorInfoHelpFunc.CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE:
id = ReadyToRunHelper.GetRuntimeType;
diff --git a/src/coreclr/tools/aot/ILCompiler/repro/repro.csproj b/src/coreclr/tools/aot/ILCompiler/repro/repro.csproj
index 09e2a5bec4c8..f7aa4cc8b93f 100644
--- a/src/coreclr/tools/aot/ILCompiler/repro/repro.csproj
+++ b/src/coreclr/tools/aot/ILCompiler/repro/repro.csproj
@@ -6,7 +6,9 @@
<PlatformTarget>AnyCPU</PlatformTarget>
<AppendTargetFrameworkToOutputPath>false</AppendTargetFrameworkToOutputPath>
<AppendRuntimeIdentifierToOutputPath>false</AppendRuntimeIdentifierToOutputPath>
- <RuntimeIdentifiers>linux-x64;win-x64;osx-x64;freebsd-x64;freebsd-arm64</RuntimeIdentifiers>
+ <RuntimeIdentifiers>linux-x64;win-x64;osx-x64</RuntimeIdentifiers>
+ <!-- FreeBSD runtime/apphost packs aren't built in the official build so only reference the RIDs when targetting FreeBSD -->
+ <RuntimeIdentifiers Condition="'$(TargetOS)' == 'freebsd'">$(RuntimeIdentifiers);freebsd-x64;freebsd-arm64</RuntimeIdentifiers>
<Configurations>Debug;Release;Checked</Configurations>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<RunAnalyzers>false</RunAnalyzers>
diff --git a/src/coreclr/tools/superpmi/superpmi-shared/compileresult.cpp b/src/coreclr/tools/superpmi/superpmi-shared/compileresult.cpp
index ec6e88684060..74040dc5aa3b 100644
--- a/src/coreclr/tools/superpmi/superpmi-shared/compileresult.cpp
+++ b/src/coreclr/tools/superpmi/superpmi-shared/compileresult.cpp
@@ -691,6 +691,18 @@ const char* relocationTypeToString(uint16_t fRelocType)
// From corinfo.h
case IMAGE_REL_BASED_REL32:
return "rel32";
+ case IMAGE_REL_SECREL:
+ return "secrel";
+ case IMAGE_REL_TLSGD:
+ return "tlsgd";
+ case IMAGE_REL_AARCH64_TLSDESC_ADR_PAGE21:
+ return "tlsdesc_high21";
+ case IMAGE_REL_AARCH64_TLSDESC_LD64_LO12:
+ return "tlsdesc_lo12";
+ case IMAGE_REL_AARCH64_TLSDESC_ADD_LO12:
+ return "tlsdesc_add_lo12";
+ case IMAGE_REL_AARCH64_TLSDESC_CALL:
+ return "tlsdesc_call";
case IMAGE_REL_BASED_THUMB_BRANCH24:
return "thumb_branch24";
default:
@@ -851,6 +863,7 @@ void CompileResult::applyRelocs(RelocContext* rc, unsigned char* block1, ULONG b
break;
case IMAGE_REL_ARM64_PAGEBASE_REL21: // ADRP 21 bit PC-relative page address
+ case IMAGE_REL_AARCH64_TLSDESC_ADR_PAGE21: // ADRP 21 bit for TLSDesc
{
if ((section_begin <= address) && (address < section_end)) // A reloc for our section?
{
@@ -875,6 +888,16 @@ void CompileResult::applyRelocs(RelocContext* rc, unsigned char* block1, ULONG b
}
break;
+ case IMAGE_REL_AARCH64_TLSDESC_LD64_LO12:
+ case IMAGE_REL_AARCH64_TLSDESC_ADD_LO12: // TLSDESC ADD for corresponding ADRP
+ case IMAGE_REL_AARCH64_TLSDESC_CALL:
+ {
+ // These are patched later by linker during actual execution
+ // and do not need relocation.
+ wasRelocHandled = true;
+ }
+ break;
+
default:
break;
}
@@ -902,13 +925,19 @@ void CompileResult::applyRelocs(RelocContext* rc, unsigned char* block1, ULONG b
wasRelocHandled = true;
}
+ else if (relocType == IMAGE_REL_TLSGD)
+ {
+ // These are patched later by linker during actual execution
+ // and do not need relocation.
+ wasRelocHandled = true;
+ }
}
if (wasRelocHandled)
continue;
// Now do all-platform relocations.
- if (tmp.fRelocType == IMAGE_REL_BASED_REL32)
+ if ((tmp.fRelocType == IMAGE_REL_BASED_REL32) || (tmp.fRelocType == IMAGE_REL_SECREL))
{
DWORDLONG fixupLocation = tmp.location;
diff --git a/src/coreclr/vm/CMakeLists.txt b/src/coreclr/vm/CMakeLists.txt
index eb00b7c6a675..ccd8bc35c8bf 100644
--- a/src/coreclr/vm/CMakeLists.txt
+++ b/src/coreclr/vm/CMakeLists.txt
@@ -636,7 +636,6 @@ if(CLR_CMAKE_TARGET_ARCH_AMD64)
${ARCH_SOURCES_DIR}/AsmHelpers.asm
${ARCH_SOURCES_DIR}/CallDescrWorkerAMD64.asm
${ARCH_SOURCES_DIR}/ComCallPreStub.asm
- ${ARCH_SOURCES_DIR}/CrtHelpers.asm
${ARCH_SOURCES_DIR}/GenericComCallStubs.asm
${ARCH_SOURCES_DIR}/GenericComPlusCallStubs.asm
${ARCH_SOURCES_DIR}/getstate.asm
@@ -676,7 +675,6 @@ elseif(CLR_CMAKE_TARGET_ARCH_ARM64)
set(VM_SOURCES_WKS_ARCH_ASM
${ARCH_SOURCES_DIR}/AsmHelpers.asm
${ARCH_SOURCES_DIR}/CallDescrWorkerARM64.asm
- ${ARCH_SOURCES_DIR}/CrtHelpers.asm
${ARCH_SOURCES_DIR}/patchedcode.asm
${ARCH_SOURCES_DIR}/PInvokeStubs.asm
${ARCH_SOURCES_DIR}/thunktemplates.asm
@@ -693,7 +691,6 @@ else(CLR_CMAKE_TARGET_WIN32)
set(VM_SOURCES_WKS_ARCH_ASM
${ARCH_SOURCES_DIR}/asmhelpers.S
${ARCH_SOURCES_DIR}/calldescrworkeramd64.S
- ${ARCH_SOURCES_DIR}/crthelpers.S
${ARCH_SOURCES_DIR}/externalmethodfixupthunk.S
${ARCH_SOURCES_DIR}/getstate.S
${ARCH_SOURCES_DIR}/jithelpers_fast.S
@@ -723,7 +720,6 @@ else(CLR_CMAKE_TARGET_WIN32)
elseif(CLR_CMAKE_TARGET_ARCH_ARM)
set(VM_SOURCES_WKS_ARCH_ASM
${ARCH_SOURCES_DIR}/asmhelpers.S
- ${ARCH_SOURCES_DIR}/crthelpers.S
${ARCH_SOURCES_DIR}/ehhelpers.S
${ARCH_SOURCES_DIR}/patchedcode.S
${ARCH_SOURCES_DIR}/pinvokestubs.S
@@ -733,7 +729,6 @@ else(CLR_CMAKE_TARGET_WIN32)
set(VM_SOURCES_WKS_ARCH_ASM
${ARCH_SOURCES_DIR}/asmhelpers.S
${ARCH_SOURCES_DIR}/calldescrworkerarm64.S
- ${ARCH_SOURCES_DIR}/crthelpers.S
${ARCH_SOURCES_DIR}/patchedcode.S
${ARCH_SOURCES_DIR}/pinvokestubs.S
${ARCH_SOURCES_DIR}/thunktemplates.S
@@ -742,7 +737,6 @@ else(CLR_CMAKE_TARGET_WIN32)
set(VM_SOURCES_WKS_ARCH_ASM
${ARCH_SOURCES_DIR}/asmhelpers.S
${ARCH_SOURCES_DIR}/calldescrworkerloongarch64.S
- ${ARCH_SOURCES_DIR}/crthelpers.S
${ARCH_SOURCES_DIR}/pinvokestubs.S
${ARCH_SOURCES_DIR}/thunktemplates.S
)
@@ -750,7 +744,6 @@ else(CLR_CMAKE_TARGET_WIN32)
set(VM_SOURCES_WKS_ARCH_ASM
${ARCH_SOURCES_DIR}/asmhelpers.S
${ARCH_SOURCES_DIR}/calldescrworkerriscv64.S
- ${ARCH_SOURCES_DIR}/crthelpers.S
${ARCH_SOURCES_DIR}/pinvokestubs.S
${ARCH_SOURCES_DIR}/thunktemplates.S
)
diff --git a/src/coreclr/vm/amd64/CrtHelpers.asm b/src/coreclr/vm/amd64/CrtHelpers.asm
deleted file mode 100644
index 09f48fa5879b..000000000000
--- a/src/coreclr/vm/amd64/CrtHelpers.asm
+++ /dev/null
@@ -1,79 +0,0 @@
-; Licensed to the .NET Foundation under one or more agreements.
-; The .NET Foundation licenses this file to you under the MIT license.
-
-include AsmMacros.inc
-
-extern memset:proc
-extern memmove:proc
-
-; JIT_MemSet/JIT_MemCpy
-;
-; It is IMPORTANT that the exception handling code is able to find these guys
-; on the stack, but on windows platforms we can just defer to the platform
-; implementation.
-;
-
-; void JIT_MemSet(void* dest, int c, size_t count)
-;
-; Purpose:
-; Sets the first "count" bytes of the block of memory pointed byte
-; "dest" to the specified value (interpreted as an unsigned char).
-;
-; Entry:
-; RCX: void* dest - Pointer to the block of memory to fill.
-; RDX: int c - Value to be set.
-; R8: size_t count - Number of bytes to be set to the value.
-;
-; Exit:
-;
-; Uses:
-;
-; Exceptions:
-;
-LEAF_ENTRY JIT_MemSet, _TEXT
- test r8, r8 ; check if count is zero
- jz Exit_MemSet ; if zero, no bytes to set
-
- cmp byte ptr [rcx], 0 ; check dest for null
-
- jmp memset ; forward to the CRT implementation
-
-Exit_MemSet:
- ret
-
-LEAF_END_MARKED JIT_MemSet, _TEXT
-
-; void JIT_MemCpy(void* dest, const void* src, size_t count)
-;
-; Purpose:
-; Copies the values of "count" bytes from the location pointed to
-; by "src" to the memory block pointed by "dest".
-;
-; Entry:
-; RCX: void* dest - Pointer to the destination array where content is to be copied.
-; RDX: const void* src - Pointer to the source of the data to be copied.
-; R8: size_t count - Number of bytes to copy.
-;
-; Exit:
-;
-; Uses:
-;
-; Exceptions:
-;
-LEAF_ENTRY JIT_MemCpy, _TEXT
- test r8, r8 ; check if count is zero
- jz Exit_MemCpy ; if zero, no bytes to copy
-
- cmp byte ptr [rcx], 0 ; check dest for null
- cmp byte ptr [rdx], 0 ; check src for null
-
- ; Use memmove to handle overlapping buffers for better
- ; compatibility with .NET Framework. Needing to handle
- ; overlapping buffers in cpblk is undefined by the spec.
- jmp memmove ; forward to the CRT implementation
-
-Exit_MemCpy:
- ret
-
-LEAF_END_MARKED JIT_MemCpy, _TEXT
- end
diff --git a/src/coreclr/vm/amd64/cgenamd64.cpp b/src/coreclr/vm/amd64/cgenamd64.cpp
index 261ecec5c46d..f774d71a3b42 100644
--- a/src/coreclr/vm/amd64/cgenamd64.cpp
+++ b/src/coreclr/vm/amd64/cgenamd64.cpp
@@ -206,7 +206,7 @@ void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloat
#endif // TARGET_UNIX
-#define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = m_MachState.m_Ptrs.p##regname;
+#define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = (DWORD64 *)(TADDR *)m_MachState.m_Ptrs.p##regname;
ENUM_CALLEE_SAVED_REGISTERS();
#undef CALLEE_SAVED_REGISTER
diff --git a/src/coreclr/vm/amd64/crthelpers.S b/src/coreclr/vm/amd64/crthelpers.S
deleted file mode 100644
index 82219e574092..000000000000
--- a/src/coreclr/vm/amd64/crthelpers.S
+++ /dev/null
@@ -1,74 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-.intel_syntax noprefix
-#include "unixasmmacros.inc"
-#include "asmconstants.h"
-
-// JIT_MemSet/JIT_MemCpy
-//
-// It is IMPORTANT that the exception handling code is able to find these guys
-// on the stack, but on non-windows platforms we can just defer to the platform
-// implementation.
-//
-
-// void JIT_MemSet(void* dest, int c, size_t count)
-//
-// Purpose:
-// Sets the first "count" bytes of the block of memory pointed byte
-// "dest" to the specified value (interpreted as an unsigned char).
-//
-// Entry:
-// RDI: void* dest - Pointer to the block of memory to fill.
-// RSI: int c - Value to be set.
-// RDX: size_t count - Number of bytes to be set to the value.
-//
-// Exit:
-//
-// Uses:
-//
-// Exceptions:
-//
-LEAF_ENTRY JIT_MemSet, _TEXT
- test rdx, rdx // check if count is zero
- jz Exit_MemSet // if zero, no bytes to set
-
- cmp byte ptr [rdi], 0 // check dest for null
-
- jmp C_PLTFUNC(memset) // forward to the CRT implementation
-
-Exit_MemSet:
- ret
-
-LEAF_END_MARKED JIT_MemSet, _TEXT
-
-// void JIT_MemCpy(void* dest, const void* src, size_t count)
-//
-// Purpose:
-// Copies the values of "count" bytes from the location pointed to
-// by "src" to the memory block pointed by "dest".
-//
-// Entry:
-// RDI: void* dest - Pointer to the destination array where content is to be copied.
-// RSI: const void* src - Pointer to the source of the data to be copied.
-// RDX: size_t count - Number of bytes to copy.
-//
-// Exit:
-//
-// Uses:
-//
-// Exceptions:
-//
-LEAF_ENTRY JIT_MemCpy, _TEXT
- test rdx, rdx // check if count is zero
- jz Exit_MemCpy // if zero, no bytes to set
-
- cmp byte ptr [rdi], 0 // check dest for null
- cmp byte ptr [rsi], 0 // check src for null
-
- jmp C_PLTFUNC(memcpy) // forward to the CRT implementation
-
-Exit_MemCpy:
- ret
-
-LEAF_END_MARKED JIT_MemCpy, _TEXT
diff --git a/src/coreclr/vm/amd64/gmsamd64.cpp b/src/coreclr/vm/amd64/gmsamd64.cpp
index bc1079c14ace..41c7b0c9afa2 100644
--- a/src/coreclr/vm/amd64/gmsamd64.cpp
+++ b/src/coreclr/vm/amd64/gmsamd64.cpp
@@ -136,7 +136,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState,
#else // !DACCESS_COMPILE
-#define CALLEE_SAVED_REGISTER(regname) unwoundState->m_Ptrs.p##regname = PTR_ULONG64(nonVolRegPtrs.regname);
+#define CALLEE_SAVED_REGISTER(regname) unwoundState->m_Ptrs.p##regname = PTR_TADDR(nonVolRegPtrs.regname);
ENUM_CALLEE_SAVED_REGISTERS();
#undef CALLEE_SAVED_REGISTER
diff --git a/src/coreclr/vm/appdomain.cpp b/src/coreclr/vm/appdomain.cpp
index feafd1f8abad..bb5d3d17e005 100644
--- a/src/coreclr/vm/appdomain.cpp
+++ b/src/coreclr/vm/appdomain.cpp
@@ -1352,7 +1352,7 @@ void SystemDomain::LoadBaseSystemClasses()
// further loading of nonprimitive types may need casting support.
// initialize cast cache here.
CastCache::Initialize();
- ECall::PopulateManagedCastHelpers();
+ ECall::PopulateManagedHelpers();
// used by IsImplicitInterfaceOfSZArray
CoreLibBinder::GetClass(CLASS__IENUMERABLEGENERIC);
diff --git a/src/coreclr/vm/arm/crthelpers.S b/src/coreclr/vm/arm/crthelpers.S
deleted file mode 100644
index db0ed192c4d6..000000000000
--- a/src/coreclr/vm/arm/crthelpers.S
+++ /dev/null
@@ -1,51 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-// ***********************************************************************
-// File: crthelpers.S
-//
-// ***********************************************************************
-
-#include "unixasmmacros.inc"
-#include "asmconstants.h"
-
-.syntax unified
-.thumb
-
-// JIT_MemSet/JIT_MemCpy
-//
-// It is IMPORANT that the exception handling code is able to find these guys
-// on the stack, but to keep them from being tailcalled by VC++ we need to turn
-// off optimization and it ends up being a wasteful implementation.
-//
-// Hence these assembly helpers.
-//
-//EXTERN_C void __stdcall JIT_MemSet(void* _dest, int c, size_t count)
-LEAF_ENTRY JIT_MemSet, _TEXT
-
- cmp r2, #0
- it eq
- bxeq lr
-
- ldrb r3, [r0]
-
- b C_PLTFUNC(memset)
-
-LEAF_END_MARKED JIT_MemSet, _TEXT
-
-
-//EXTERN_C void __stdcall JIT_MemCpy(void* _dest, const void *_src, size_t count)
-LEAF_ENTRY JIT_MemCpy, _TEXT
-//
-
- cmp r2, #0
- it eq
- bxeq lr
-
- ldrb r3, [r0]
- ldrb r3, [r1]
-
- b C_PLTFUNC(memcpy)
-
-LEAF_END_MARKED JIT_MemCpy, _TEXT
-
diff --git a/src/coreclr/vm/arm/stubs.cpp b/src/coreclr/vm/arm/stubs.cpp
index d72c32201700..1424dcecbd91 100644
--- a/src/coreclr/vm/arm/stubs.cpp
+++ b/src/coreclr/vm/arm/stubs.cpp
@@ -1619,7 +1619,7 @@ void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloats
// Update the frame pointer in the current context.
pRD->pCurrentContext->R11 = m_pCalleeSavedFP;
- pRD->pCurrentContextPointers->R11 = &m_pCalleeSavedFP;
+ pRD->pCurrentContextPointers->R11 = (DWORD *)&m_pCalleeSavedFP;
// This is necessary to unwind methods with alloca. This needs to stay
// in sync with definition of REG_SAVED_LOCALLOC_SP in the JIT.
diff --git a/src/coreclr/vm/arm64/crthelpers.S b/src/coreclr/vm/arm64/crthelpers.S
deleted file mode 100644
index e123fc82808d..000000000000
--- a/src/coreclr/vm/arm64/crthelpers.S
+++ /dev/null
@@ -1,33 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-#include "unixasmmacros.inc"
-
-// JIT_MemSet/JIT_MemCpy
-//
-// It is IMPORTANT that the exception handling code is able to find these guys
-// on the stack, but on non-windows platforms we can just defer to the platform
-// implementation.
-//
-LEAF_ENTRY JIT_MemSet, _TEXT
- cbz x2, LOCAL_LABEL(JIT_MemSet_ret)
-
- ldrb wzr, [x0]
-
- b C_PLTFUNC(memset)
-
-LOCAL_LABEL(JIT_MemSet_ret):
- ret lr
-LEAF_END_MARKED JIT_MemSet, _TEXT
-
-LEAF_ENTRY JIT_MemCpy, _TEXT
- cbz x2, LOCAL_LABEL(JIT_MemCpy_ret)
-
- ldrb wzr, [x0]
- ldrb wzr, [x1]
-
- b C_PLTFUNC(memcpy)
-
-LOCAL_LABEL(JIT_MemCpy_ret):
- ret lr
-LEAF_END_MARKED JIT_MemCpy, _TEXT
diff --git a/src/coreclr/vm/arm64/crthelpers.asm b/src/coreclr/vm/arm64/crthelpers.asm
deleted file mode 100644
index d4d13351365c..000000000000
--- a/src/coreclr/vm/arm64/crthelpers.asm
+++ /dev/null
@@ -1,81 +0,0 @@
-; Licensed to the .NET Foundation under one or more agreements.
-; The .NET Foundation licenses this file to you under the MIT license.
-
-#include "ksarm64.h"
-#include "asmconstants.h"
-#include "asmmacros.h"
-
- IMPORT memset
- IMPORT memmove
-
-; JIT_MemSet/JIT_MemCpy
-;
-; It is IMPORTANT that the exception handling code is able to find these guys
-; on the stack, but on windows platforms we can just defer to the platform
-; implementation.
-;
-
-; void JIT_MemSet(void* dest, int c, size_t count)
-;
-; Purpose:
-; Sets the first "count" bytes of the block of memory pointed byte
-; "dest" to the specified value (interpreted as an unsigned char).
-;
-; Entry:
-; RCX: void* dest - Pointer to the block of memory to fill.
-; RDX: int c - Value to be set.
-; R8: size_t count - Number of bytes to be set to the value.
-;
-; Exit:
-;
-; Uses:
-;
-; Exceptions:
-;
-
- TEXTAREA
-
- LEAF_ENTRY JIT_MemSet
- cbz x2, JIT_MemSet_ret ; check if count is zero, no bytes to set
-
- ldrb wzr, [x0] ; check dest for null
-
- b memset ; forward to the CRT implementation
-
-JIT_MemSet_ret
- ret lr
-
- LEAF_END_MARKED JIT_MemSet
-
-; void JIT_MemCpy(void* dest, const void* src, size_t count)
-;
-; Purpose:
-; Copies the values of "count" bytes from the location pointed to
-; by "src" to the memory block pointed by "dest".
-;
-; Entry:
-; RCX: void* dest - Pointer to the destination array where content is to be copied.
-; RDX: const void* src - Pointer to the source of the data to be copied.
-; R8: size_t count - Number of bytes to copy.
-;
-; Exit:
-;
-; Uses:
-;
-; Exceptions:
-;
- LEAF_ENTRY JIT_MemCpy
- cbz x2, JIT_MemCpy_ret ; check if count is zero, no bytes to set
-
- ldrb wzr, [x0] ; check dest for null
- ldrb wzr, [x1] ; check src for null
-
- b memmove ; forward to the CRT implementation
-
-JIT_MemCpy_ret
- ret lr
-
- LEAF_END_MARKED JIT_MemCpy
-
-; Must be at very end of file
- END
diff --git a/src/coreclr/vm/arm64/stubs.cpp b/src/coreclr/vm/arm64/stubs.cpp
index 9f0c9ae14e71..03783f016a52 100644
--- a/src/coreclr/vm/arm64/stubs.cpp
+++ b/src/coreclr/vm/arm64/stubs.cpp
@@ -731,7 +731,7 @@ void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD, bool updateFloats
// Update the frame pointer in the current context.
- pRD->pCurrentContextPointers->Fp = &m_pCalleeSavedFP;
+ pRD->pCurrentContextPointers->Fp = (DWORD64 *)&m_pCalleeSavedFP;
LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK InlinedCallFrame::UpdateRegDisplay(pc:%p, sp:%p)\n", pRD->ControlPC, pRD->SP));
diff --git a/src/coreclr/vm/ceeload.cpp b/src/coreclr/vm/ceeload.cpp
index e05302f53670..547db8d05971 100644
--- a/src/coreclr/vm/ceeload.cpp
+++ b/src/coreclr/vm/ceeload.cpp
@@ -3402,24 +3402,6 @@ MethodDesc *Module::FindMethod(mdToken pMethod)
RETURN pMDRet;
}
-//
-// GetPropertyInfoForMethodDef wraps the metadata function of the same name.
-//
-
-HRESULT Module::GetPropertyInfoForMethodDef(mdMethodDef md, mdProperty *ppd, LPCSTR *pName, ULONG *pSemantic)
-{
- CONTRACTL
- {
- INSTANCE_CHECK;
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- return GetMDImport()->GetPropertyInfoForMethodDef(md, ppd, pName, pSemantic);
-}
-
// Return true if this module has any live (jitted) JMC functions.
// If a module has no jitted JMC functions, then it's as if it's a
// non-user module.
@@ -4677,6 +4659,122 @@ PTR_VOID ReflectionModule::GetRvaField(RVA field) // virtual
// VASigCookies
// ===========================================================================
+static bool TypeSignatureContainsGenericVariables(SigParser& sp);
+static bool MethodSignatureContainsGenericVariables(SigParser& sp);
+
+static bool TypeSignatureContainsGenericVariables(SigParser& sp)
+{
+ STANDARD_VM_CONTRACT;
+
+ CorElementType et = ELEMENT_TYPE_END;
+ IfFailThrow(sp.GetElemType(&et));
+
+ if (CorIsPrimitiveType(et))
+ return false;
+
+ switch (et)
+ {
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_TYPEDBYREF:
+ return false;
+
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_SZARRAY:
+ return TypeSignatureContainsGenericVariables(sp);
+
+ case ELEMENT_TYPE_VALUETYPE:
+ case ELEMENT_TYPE_CLASS:
+ IfFailThrow(sp.GetToken(NULL)); // Skip RID
+ return false;
+
+ case ELEMENT_TYPE_FNPTR:
+ return MethodSignatureContainsGenericVariables(sp);
+
+ case ELEMENT_TYPE_ARRAY:
+ {
+ if (TypeSignatureContainsGenericVariables(sp))
+ return true;
+
+ uint32_t rank;
+ IfFailThrow(sp.GetData(&rank)); // Get rank
+ if (rank)
+ {
+ uint32_t nsizes;
+ IfFailThrow(sp.GetData(&nsizes)); // Get # of sizes
+ while (nsizes--)
+ {
+ IfFailThrow(sp.GetData(NULL)); // Skip size
+ }
+
+ uint32_t nlbounds;
+ IfFailThrow(sp.GetData(&nlbounds)); // Get # of lower bounds
+ while (nlbounds--)
+ {
+ IfFailThrow(sp.GetData(NULL)); // Skip lower bounds
+ }
+ }
+ }
+ return false;
+
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ if (TypeSignatureContainsGenericVariables(sp))
+ return true;
+
+ uint32_t argCnt;
+ IfFailThrow(sp.GetData(&argCnt)); // Get number of parameters
+ while (argCnt--)
+ {
+ if (TypeSignatureContainsGenericVariables(sp))
+ return true;
+ }
+ }
+ return false;
+
+ case ELEMENT_TYPE_INTERNAL:
+ IfFailThrow(sp.GetPointer(NULL));
+ return false;
+
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ return true;
+
+ default:
+ // Return conservative answer for unhandled elements
+ _ASSERTE(!"Unexpected element type.");
+ return true;
+ }
+}
+
+static bool MethodSignatureContainsGenericVariables(SigParser& sp)
+{
+ STANDARD_VM_CONTRACT;
+
+ uint32_t callConv = 0;
+ IfFailThrow(sp.GetCallingConvInfo(&callConv));
+
+ if (callConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
+ {
+ // Generic signatures should never show up here, return conservative answer.
+ _ASSERTE(!"Unexpected generic signature.");
+ return true;
+ }
+
+ uint32_t numArgs = 0;
+ IfFailThrow(sp.GetData(&numArgs));
+
+ // iterate over the return type and parameters
+ for (uint32_t i = 0; i <= numArgs; i++)
+ {
+ if (TypeSignatureContainsGenericVariables(sp))
+ return true;
+ }
+
+ return false;
+}
+
//==========================================================================
// Enregisters a VASig.
//==========================================================================
@@ -4685,15 +4783,39 @@ VASigCookie *Module::GetVASigCookie(Signature vaSignature, const SigTypeContext*
CONTRACT(VASigCookie*)
{
INSTANCE_CHECK;
- THROWS;
- GC_TRIGGERS;
- MODE_ANY;
+ STANDARD_VM_CHECK;
POSTCONDITION(CheckPointer(RETVAL));
INJECT_FAULT(COMPlusThrowOM());
}
CONTRACT_END;
- Module* pLoaderModule = ClassLoader::ComputeLoaderModuleWorker(this, mdTokenNil, typeContext->m_classInst, typeContext->m_methodInst);
+ SigTypeContext emptyContext;
+
+ Module* pLoaderModule = this;
+ if (!typeContext->IsEmpty())
+ {
+ // Strip the generic context if it is not actually used by the signature. It is nececessary for both:
+ // - Performance: allow more sharing of vasig cookies
+ // - Functionality: built-in runtime marshalling is disallowed for generic signatures
+ SigParser sigParser = vaSignature.CreateSigParser();
+ if (MethodSignatureContainsGenericVariables(sigParser))
+ {
+ pLoaderModule = ClassLoader::ComputeLoaderModuleWorker(this, mdTokenNil, typeContext->m_classInst, typeContext->m_methodInst);
+ }
+ else
+ {
+ typeContext = &emptyContext;
+ }
+ }
+ else
+ {
+#ifdef _DEBUG
+ // The method signature should not contain any generic variables if the generic context is not provided.
+ SigParser sigParser = vaSignature.CreateSigParser();
+ _ASSERTE(!MethodSignatureContainsGenericVariables(sigParser));
+#endif
+ }
+
VASigCookie *pCookie = GetVASigCookieWorker(this, pLoaderModule, vaSignature, typeContext);
RETURN pCookie;
@@ -4703,9 +4825,7 @@ VASigCookie *Module::GetVASigCookieWorker(Module* pDefiningModule, Module* pLoad
{
CONTRACT(VASigCookie*)
{
- THROWS;
- GC_TRIGGERS;
- MODE_ANY;
+ STANDARD_VM_CHECK;
POSTCONDITION(CheckPointer(RETVAL));
INJECT_FAULT(COMPlusThrowOM());
}
diff --git a/src/coreclr/vm/ceeload.h b/src/coreclr/vm/ceeload.h
index 3ad7b37a76a3..1b487dd9fa87 100644
--- a/src/coreclr/vm/ceeload.h
+++ b/src/coreclr/vm/ceeload.h
@@ -1324,8 +1324,6 @@ public:
MethodDesc *FindMethodThrowing(mdToken pMethod);
MethodDesc *FindMethod(mdToken pMethod);
- HRESULT GetPropertyInfoForMethodDef(mdMethodDef md, mdProperty *ppd, LPCSTR *pName, ULONG *pSemantic);
-
public:
// Debugger stuff
diff --git a/src/coreclr/vm/ceeload.inl b/src/coreclr/vm/ceeload.inl
index ff446d4ec799..18d7557d6e85 100644
--- a/src/coreclr/vm/ceeload.inl
+++ b/src/coreclr/vm/ceeload.inl
@@ -64,7 +64,7 @@ inline
void LookupMap<SIZE_T>::SetValueAt(PTR_TADDR pValue, SIZE_T value, TADDR flags)
{
WRAPPER_NO_CONTRACT;
- VolatileStore(pValue, value | flags);
+ VolatileStore(pValue, dac_cast<TADDR>(value | flags));
}
#endif // DACCESS_COMPILE
diff --git a/src/coreclr/vm/clrtocomcall.cpp b/src/coreclr/vm/clrtocomcall.cpp
index c604a6c8a901..d47445f8f64f 100644
--- a/src/coreclr/vm/clrtocomcall.cpp
+++ b/src/coreclr/vm/clrtocomcall.cpp
@@ -505,7 +505,7 @@ UINT32 CLRToCOMLateBoundWorker(
ULONG uSemantic;
// See if there is property information for this member.
- hr = pItfMT->GetModule()->GetPropertyInfoForMethodDef(pItfMD->GetMemberDef(), &propToken, &strMemberName, &uSemantic);
+ hr = pItfMT->GetMDImport()->GetPropertyInfoForMethodDef(pItfMD->GetMemberDef(), &propToken, &strMemberName, &uSemantic);
if (hr != S_OK)
{
// Non-property method
diff --git a/src/coreclr/vm/commtmemberinfomap.cpp b/src/coreclr/vm/commtmemberinfomap.cpp
index 8bc185e9d81a..7b2626a24c76 100644
--- a/src/coreclr/vm/commtmemberinfomap.cpp
+++ b/src/coreclr/vm/commtmemberinfomap.cpp
@@ -689,7 +689,7 @@ void ComMTMemberInfoMap::GetMethodPropsForMeth(
rProps[ix].bFunction2Getter = FALSE;
// See if there is property information for this member.
- hr = pMeth->GetModule()->GetPropertyInfoForMethodDef(pMeth->GetMemberDef(), &pd, &pPropName, &uSemantic);
+ hr = pMeth->GetMDImport()->GetPropertyInfoForMethodDef(pMeth->GetMemberDef(), &pd, &pPropName, &uSemantic);
IfFailThrow(hr);
if (hr == S_OK)
diff --git a/src/coreclr/vm/comutilnative.cpp b/src/coreclr/vm/comutilnative.cpp
index 612cb9d72dc0..6c7e2468d274 100644
--- a/src/coreclr/vm/comutilnative.cpp
+++ b/src/coreclr/vm/comutilnative.cpp
@@ -1703,9 +1703,10 @@ enum ValueTypeHashCodeStrategy
DoubleField,
SingleField,
FastGetHashCode,
+ ValueTypeOverride,
};
-static ValueTypeHashCodeStrategy GetHashCodeStrategy(MethodTable* mt, QCall::ObjectHandleOnStack objHandle, UINT32* fieldOffset, UINT32* fieldSize)
+static ValueTypeHashCodeStrategy GetHashCodeStrategy(MethodTable* mt, QCall::ObjectHandleOnStack objHandle, UINT32* fieldOffset, UINT32* fieldSize, MethodTable** fieldMTOut)
{
CONTRACTL
{
@@ -1772,10 +1773,18 @@ static ValueTypeHashCodeStrategy GetHashCodeStrategy(MethodTable* mt, QCall::Obj
*fieldSize = field->LoadSize();
ret = ValueTypeHashCodeStrategy::FastGetHashCode;
}
+ else if (HasOverriddenMethod(fieldMT,
+ CoreLibBinder::GetClass(CLASS__VALUE_TYPE),
+ CoreLibBinder::GetMethod(METHOD__VALUE_TYPE__GET_HASH_CODE)->GetSlot()))
+ {
+ *fieldOffset += field->GetOffsetUnsafe();
+ *fieldMTOut = fieldMT;
+ ret = ValueTypeHashCodeStrategy::ValueTypeOverride;
+ }
else
{
*fieldOffset += field->GetOffsetUnsafe();
- ret = GetHashCodeStrategy(fieldMT, objHandle, fieldOffset, fieldSize);
+ ret = GetHashCodeStrategy(fieldMT, objHandle, fieldOffset, fieldSize, fieldMTOut);
}
}
}
@@ -1785,18 +1794,18 @@ static ValueTypeHashCodeStrategy GetHashCodeStrategy(MethodTable* mt, QCall::Obj
return ret;
}
-extern "C" INT32 QCALLTYPE ValueType_GetHashCodeStrategy(MethodTable* mt, QCall::ObjectHandleOnStack objHandle, UINT32* fieldOffset, UINT32* fieldSize)
+extern "C" INT32 QCALLTYPE ValueType_GetHashCodeStrategy(MethodTable* mt, QCall::ObjectHandleOnStack objHandle, UINT32* fieldOffset, UINT32* fieldSize, MethodTable** fieldMT)
{
QCALL_CONTRACT;
ValueTypeHashCodeStrategy ret = ValueTypeHashCodeStrategy::None;
*fieldOffset = 0;
*fieldSize = 0;
+ *fieldMT = NULL;
BEGIN_QCALL;
-
- ret = GetHashCodeStrategy(mt, objHandle, fieldOffset, fieldSize);
+ ret = GetHashCodeStrategy(mt, objHandle, fieldOffset, fieldSize, fieldMT);
END_QCALL;
diff --git a/src/coreclr/vm/comutilnative.h b/src/coreclr/vm/comutilnative.h
index a3c5ea65c3ca..0f305e0af900 100644
--- a/src/coreclr/vm/comutilnative.h
+++ b/src/coreclr/vm/comutilnative.h
@@ -252,7 +252,7 @@ public:
extern "C" BOOL QCALLTYPE MethodTable_AreTypesEquivalent(MethodTable* mta, MethodTable* mtb);
extern "C" BOOL QCALLTYPE MethodTable_CanCompareBitsOrUseFastGetHashCode(MethodTable* mt);
-extern "C" INT32 QCALLTYPE ValueType_GetHashCodeStrategy(MethodTable* mt, QCall::ObjectHandleOnStack objHandle, UINT32* fieldOffset, UINT32* fieldSize);
+extern "C" INT32 QCALLTYPE ValueType_GetHashCodeStrategy(MethodTable* mt, QCall::ObjectHandleOnStack objHandle, UINT32* fieldOffset, UINT32* fieldSize, MethodTable** fieldMT);
class StreamNative {
public:
diff --git a/src/coreclr/vm/comwaithandle.cpp b/src/coreclr/vm/comwaithandle.cpp
index 3af42e09ecdf..8ec141aa2a4e 100644
--- a/src/coreclr/vm/comwaithandle.cpp
+++ b/src/coreclr/vm/comwaithandle.cpp
@@ -16,7 +16,7 @@
#include "excep.h"
#include "comwaithandle.h"
-FCIMPL3(INT32, WaitHandleNative::CorWaitOneNative, HANDLE handle, INT32 timeout, CLR_BOOL useTrivialWaits)
+FCIMPL2(INT32, WaitHandleNative::CorWaitOneNative, HANDLE handle, INT32 timeout)
{
FCALL_CONTRACT;
@@ -28,8 +28,7 @@ FCIMPL3(INT32, WaitHandleNative::CorWaitOneNative, HANDLE handle, INT32 timeout,
Thread* pThread = GET_THREAD();
- WaitMode waitMode = (WaitMode)((!useTrivialWaits ? WaitMode_Alertable : WaitMode_None) | WaitMode_IgnoreSyncCtx);
- retVal = pThread->DoAppropriateWait(1, &handle, TRUE, timeout, waitMode);
+ retVal = pThread->DoAppropriateWait(1, &handle, TRUE, timeout, (WaitMode)(WaitMode_Alertable | WaitMode_IgnoreSyncCtx));
HELPER_METHOD_FRAME_END();
return retVal;
diff --git a/src/coreclr/vm/comwaithandle.h b/src/coreclr/vm/comwaithandle.h
index c892d7aae855..ae250a1b9a96 100644
--- a/src/coreclr/vm/comwaithandle.h
+++ b/src/coreclr/vm/comwaithandle.h
@@ -18,7 +18,7 @@
class WaitHandleNative
{
public:
- static FCDECL3(INT32, CorWaitOneNative, HANDLE handle, INT32 timeout, CLR_BOOL useTrivialWaits);
+ static FCDECL2(INT32, CorWaitOneNative, HANDLE handle, INT32 timeout);
static FCDECL4(INT32, CorWaitMultipleNative, HANDLE *handleArray, INT32 numHandles, CLR_BOOL waitForAll, INT32 timeout);
static FCDECL3(INT32, CorSignalAndWaitOneNative, HANDLE waitHandleSignalUNSAFE, HANDLE waitHandleWaitUNSAFE, INT32 timeout);
};
diff --git a/src/coreclr/vm/corelib.h b/src/coreclr/vm/corelib.h
index 8e68900686a7..c52c58954165 100644
--- a/src/coreclr/vm/corelib.h
+++ b/src/coreclr/vm/corelib.h
@@ -633,6 +633,11 @@ DEFINE_METHOD(RUNTIME_HELPERS, ALLOC_TAILCALL_ARG_BUFFER, AllocTailCallArgB
DEFINE_METHOD(RUNTIME_HELPERS, GET_TAILCALL_INFO, GetTailCallInfo, NoSig)
DEFINE_METHOD(RUNTIME_HELPERS, DISPATCH_TAILCALLS, DispatchTailCalls, NoSig)
+DEFINE_CLASS(SPAN_HELPERS, System, SpanHelpers)
+DEFINE_METHOD(SPAN_HELPERS, MEMSET, Fill, SM_RefByte_Byte_UIntPtr_RetVoid)
+DEFINE_METHOD(SPAN_HELPERS, MEMZERO, ClearWithoutReferences, SM_RefByte_UIntPtr_RetVoid)
+DEFINE_METHOD(SPAN_HELPERS, MEMCOPY, Memmove, SM_RefByte_RefByte_UIntPtr_RetVoid)
+
DEFINE_CLASS(UNSAFE, CompilerServices, Unsafe)
DEFINE_METHOD(UNSAFE, AS_POINTER, AsPointer, NoSig)
DEFINE_METHOD(UNSAFE, BYREF_IS_NULL, IsNullRef, NoSig)
diff --git a/src/coreclr/vm/debugdebugger.cpp b/src/coreclr/vm/debugdebugger.cpp
index bb5bed368de9..6077de0d8131 100644
--- a/src/coreclr/vm/debugdebugger.cpp
+++ b/src/coreclr/vm/debugdebugger.cpp
@@ -1152,9 +1152,18 @@ void DebugStackTrace::GetStackFramesFromException(OBJECTREF * e,
// to spot.
DWORD dwNativeOffset;
- if (cur.ip)
+ UINT_PTR ip = cur.ip;
+#if defined(DACCESS_COMPILE) && defined(TARGET_AMD64)
+ // Compensate for a bug in the old EH that for a frame that faulted
+ // has the ip pointing to an address before the faulting instruction
+ if (g_isNewExceptionHandlingEnabled && (i == 0) && ((cur.flags & STEF_IP_ADJUSTED) == 0))
{
- EECodeInfo codeInfo(cur.ip);
+ ip -= 1;
+ }
+#endif // DACCESS_COMPILE && TARGET_AMD64
+ if (ip)
+ {
+ EECodeInfo codeInfo(ip);
dwNativeOffset = codeInfo.GetRelOffset();
}
else
@@ -1165,7 +1174,7 @@ void DebugStackTrace::GetStackFramesFromException(OBJECTREF * e,
pData->pElements[i].InitPass1(
dwNativeOffset,
pMD,
- (PCODE)cur.ip,
+ (PCODE)ip,
cur.flags);
#ifndef DACCESS_COMPILE
pData->pElements[i].InitPass2();
diff --git a/src/coreclr/vm/dispatchinfo.cpp b/src/coreclr/vm/dispatchinfo.cpp
index eb0c83f7a6ce..8b769c71bcc0 100644
--- a/src/coreclr/vm/dispatchinfo.cpp
+++ b/src/coreclr/vm/dispatchinfo.cpp
@@ -2578,10 +2578,9 @@ bool DispatchInfo::IsPropertyAccessorVisible(bool fIsSetter, OBJECTREF* pMemberI
// Check to see if the new method is a property accessor.
mdToken tkMember = mdTokenNil;
- MethodTable *pDeclaringMT = pMDForProperty->GetMethodTable();
- if (pMDForProperty->GetModule()->GetPropertyInfoForMethodDef(pMDForProperty->GetMemberDef(), &tkMember, NULL, NULL) == S_OK)
+ if (pMDForProperty->GetMDImport()->GetPropertyInfoForMethodDef(pMDForProperty->GetMemberDef(), &tkMember, NULL, NULL) == S_OK)
{
- if (IsMemberVisibleFromCom(pDeclaringMT, tkMember, pMDForProperty->GetMemberDef()))
+ if (IsMemberVisibleFromCom(pMDForProperty->GetMethodTable(), tkMember, pMDForProperty->GetMemberDef()))
return true;
}
}
diff --git a/src/coreclr/vm/dllimport.cpp b/src/coreclr/vm/dllimport.cpp
index bc106bf0f43d..12ca187ecbea 100644
--- a/src/coreclr/vm/dllimport.cpp
+++ b/src/coreclr/vm/dllimport.cpp
@@ -4264,8 +4264,7 @@ static void CreateNDirectStubAccessMetadata(
{
if (unmgdCallConv == CorInfoCallConvExtension::Managed ||
unmgdCallConv == CorInfoCallConvExtension::Fastcall ||
- unmgdCallConv == CorInfoCallConvExtension::FastcallMemberFunction ||
- unmgdCallConv == CorInfoCallConvExtension::Swift)
+ unmgdCallConv == CorInfoCallConvExtension::FastcallMemberFunction)
{
COMPlusThrow(kTypeLoadException, IDS_INVALID_PINVOKE_CALLCONV);
}
diff --git a/src/coreclr/vm/ecall.cpp b/src/coreclr/vm/ecall.cpp
index 37ac50d124f6..7a9538d8ea7d 100644
--- a/src/coreclr/vm/ecall.cpp
+++ b/src/coreclr/vm/ecall.cpp
@@ -96,7 +96,7 @@ void ECall::PopulateManagedStringConstructors()
INDEBUG(fInitialized = true);
}
-void ECall::PopulateManagedCastHelpers()
+void ECall::PopulateManagedHelpers()
{
STANDARD_VM_CONTRACT;
@@ -144,6 +144,18 @@ void ECall::PopulateManagedCastHelpers()
pMD = CoreLibBinder::GetMethod((BinderMethodID)(METHOD__CASTHELPERS__LDELEMAREF));
pDest = pMD->GetMultiCallableAddrOfCode();
SetJitHelperFunction(CORINFO_HELP_LDELEMA_REF, pDest);
+
+ pMD = CoreLibBinder::GetMethod((BinderMethodID)(METHOD__SPAN_HELPERS__MEMSET));
+ pDest = pMD->GetMultiCallableAddrOfCode();
+ SetJitHelperFunction(CORINFO_HELP_MEMSET, pDest);
+
+ pMD = CoreLibBinder::GetMethod((BinderMethodID)(METHOD__SPAN_HELPERS__MEMZERO));
+ pDest = pMD->GetMultiCallableAddrOfCode();
+ SetJitHelperFunction(CORINFO_HELP_MEMZERO, pDest);
+
+ pMD = CoreLibBinder::GetMethod((BinderMethodID)(METHOD__SPAN_HELPERS__MEMCOPY));
+ pDest = pMD->GetMultiCallableAddrOfCode();
+ SetJitHelperFunction(CORINFO_HELP_MEMCPY, pDest);
}
static CrstStatic gFCallLock;
diff --git a/src/coreclr/vm/ecall.h b/src/coreclr/vm/ecall.h
index bc9d63ae4671..792eea633e8f 100644
--- a/src/coreclr/vm/ecall.h
+++ b/src/coreclr/vm/ecall.h
@@ -94,7 +94,7 @@ class ECall
static void PopulateManagedStringConstructors();
- static void PopulateManagedCastHelpers();
+ static void PopulateManagedHelpers();
#ifdef DACCESS_COMPILE
// Enumerates all gFCallMethods for minidumps.
diff --git a/src/coreclr/vm/ecalllist.h b/src/coreclr/vm/ecalllist.h
index 55b93f1f1ab6..9da9ebdb102a 100644
--- a/src/coreclr/vm/ecalllist.h
+++ b/src/coreclr/vm/ecalllist.h
@@ -471,8 +471,7 @@ FCFuncStart(gRuntimeHelpers)
FCFuncElement("PrepareDelegate", ReflectionInvocation::PrepareDelegate)
FCFuncElement("GetHashCode", ObjectNative::GetHashCode)
FCFuncElement("TryGetHashCode", ObjectNative::TryGetHashCode)
- FCFuncElement("Equals", ObjectNative::Equals)
- FCFuncElement("AllocateUninitializedClone", ObjectNative::AllocateUninitializedClone)
+ FCFuncElement("ContentEquals", ObjectNative::ContentEquals)
FCFuncElement("EnsureSufficientExecutionStack", ReflectionInvocation::EnsureSufficientExecutionStack)
FCFuncElement("TryEnsureSufficientExecutionStack", ReflectionInvocation::TryEnsureSufficientExecutionStack)
FCFuncElement("AllocTailCallArgBuffer", TailCallHelp::AllocTailCallArgBuffer)
diff --git a/src/coreclr/vm/eetwain.cpp b/src/coreclr/vm/eetwain.cpp
index b0886fcadebe..597fda3d45b5 100644
--- a/src/coreclr/vm/eetwain.cpp
+++ b/src/coreclr/vm/eetwain.cpp
@@ -4072,6 +4072,7 @@ bool UnwindEbpDoubleAlignFrame(
// Set baseSP as initial SP
baseSP += GetPushedArgSize(info, table, curOffs);
+#ifdef UNIX_X86_ABI
// 16-byte stack alignment padding (allocated in genFuncletProlog)
// Current funclet frame layout (see CodeGen::genFuncletProlog() and genFuncletEpilog()):
// prolog: sub esp, 12
@@ -4082,6 +4083,7 @@ bool UnwindEbpDoubleAlignFrame(
const TADDR funcletStart = pCodeInfo->GetJitManager()->GetFuncletStartAddress(pCodeInfo);
if (funcletStart != pCodeInfo->GetCodeAddress() && methodStart[pCodeInfo->GetRelOffset()] != X86_INSTR_RETN)
baseSP += 12;
+#endif
pContext->PCTAddr = baseSP;
pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
@@ -4578,6 +4580,7 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pContext,
#endif // _DEBUG
+#ifndef FEATURE_EH_FUNCLETS
/* What kind of a frame is this ? */
FrameType frameType = FR_NORMAL;
@@ -4622,6 +4625,7 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pContext,
&info);
}
}
+#endif
bool willContinueExecution = !(flags & ExecutionAborted);
unsigned pushedSize = 0;
@@ -4712,16 +4716,45 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pContext,
{
_ASSERTE(willContinueExecution);
+#ifdef FEATURE_EH_FUNCLETS
+ // Funclets' frame pointers(EBP) are always restored so they can access to main function's local variables.
+ // Therefore the value of EBP is invalid for unwinder so we should use ESP instead.
+ // See UnwindStackFrame for details.
+ if (pCodeInfo->IsFunclet())
+ {
+ PTR_CBYTE methodStart = PTR_CBYTE(pCodeInfo->GetSavedMethodCode());
+ TADDR baseSP = ESP;
+ // Set baseSP as initial SP
+ baseSP += GetPushedArgSize(&info, table, curOffs);
+
+#ifdef UNIX_X86_ABI
+ // 16-byte stack alignment padding (allocated in genFuncletProlog)
+ // Current funclet frame layout (see CodeGen::genFuncletProlog() and genFuncletEpilog()):
+ // prolog: sub esp, 12
+ // epilog: add esp, 12
+ // ret
+ // SP alignment padding should be added for all instructions except the first one and the last one.
+ // Epilog may not exist (unreachable), so we need to check the instruction code.
+ const PTR_CBYTE funcletStart = PTR_CBYTE(pCodeInfo->GetJitManager()->GetFuncletStartAddress(pCodeInfo));
+ if (funcletStart != methodStart + curOffs && methodStart[curOffs] != X86_INSTR_RETN)
+ baseSP += 12;
+#endif
+
+ // -sizeof(void*) because we want to point *AT* first parameter
+ pPendingArgFirst = (DWORD *)(size_t)(baseSP - sizeof(void*));
+ }
+#else // FEATURE_EH_FUNCLETS
if (info.handlers)
{
// -sizeof(void*) because we want to point *AT* first parameter
pPendingArgFirst = (DWORD *)(size_t)(baseSP - sizeof(void*));
}
+#endif
else if (info.localloc)
{
- baseSP = *(DWORD *)(size_t)(EBP - GetLocallocSPOffset(&info));
+ TADDR locallocBaseSP = *(DWORD *)(size_t)(EBP - GetLocallocSPOffset(&info));
// -sizeof(void*) because we want to point *AT* first parameter
- pPendingArgFirst = (DWORD *)(size_t) (baseSP - sizeof(void*));
+ pPendingArgFirst = (DWORD *)(size_t) (locallocBaseSP - sizeof(void*));
}
else
{
diff --git a/src/coreclr/vm/excep.cpp b/src/coreclr/vm/excep.cpp
index bc09fd73a469..6e82507cfb7b 100644
--- a/src/coreclr/vm/excep.cpp
+++ b/src/coreclr/vm/excep.cpp
@@ -2889,7 +2889,7 @@ VOID DECLSPEC_NORETURN RealCOMPlusThrow(Object *exceptionObj)
CONTRACTL_END;
OBJECTREF throwable = ObjectToOBJECTREF(exceptionObj);
- RealCOMPlusThrow(throwable, FALSE);
+ RealCOMPlusThrowWorker(throwable, FALSE);
}
#endif // USE_CHECKED_OBJECTREFS
@@ -6292,9 +6292,6 @@ EXTERN_C void JIT_StackProbe_End();
#ifdef FEATURE_EH_FUNCLETS
#ifndef TARGET_X86
-EXTERN_C void JIT_MemSet_End();
-EXTERN_C void JIT_MemCpy_End();
-
EXTERN_C void JIT_WriteBarrier_End();
EXTERN_C void JIT_CheckedWriteBarrier_End();
EXTERN_C void JIT_ByRefWriteBarrier_End();
@@ -6345,9 +6342,6 @@ bool IsIPInMarkedJitHelper(UINT_PTR uControlPc)
if (GetEEFuncEntryPoint(name) <= uControlPc && uControlPc < GetEEFuncEntryPoint(name##_End)) return true;
#ifndef TARGET_X86
- CHECK_RANGE(JIT_MemSet)
- CHECK_RANGE(JIT_MemCpy)
-
CHECK_RANGE(JIT_WriteBarrier)
CHECK_RANGE(JIT_CheckedWriteBarrier)
CHECK_RANGE(JIT_ByRefWriteBarrier)
@@ -7830,7 +7824,7 @@ VOID DECLSPEC_NORETURN UnwindAndContinueRethrowHelperAfterCatch(Frame* pEntryFra
}
else
{
- DispatchManagedException(orThrowable);
+ DispatchManagedException(orThrowable, /* preserveStackTrace */ false);
}
}
else
diff --git a/src/coreclr/vm/exceptionhandling.cpp b/src/coreclr/vm/exceptionhandling.cpp
index 4cf5bb3ad4b0..a6118ef56bca 100644
--- a/src/coreclr/vm/exceptionhandling.cpp
+++ b/src/coreclr/vm/exceptionhandling.cpp
@@ -19,6 +19,7 @@
#include "corinfo.h"
#include "exceptionhandlingqcalls.h"
#include "exinfo.h"
+#include "configuration.h"
#if defined(TARGET_X86)
#define USE_CURRENT_CONTEXT_IN_FILTER
@@ -236,7 +237,7 @@ void InitializeExceptionHandling()
// Initialize the lock used for synchronizing access to the stacktrace in the exception object
g_StackTraceArrayLock.Init(LOCK_TYPE_DEFAULT, TRUE);
- g_isNewExceptionHandlingEnabled = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableNewExceptionHandling) != 0;
+ g_isNewExceptionHandlingEnabled = Configuration::GetKnobBooleanValue(W("System.Runtime.LegacyExceptionHandling"), CLRConfig::EXTERNAL_LegacyExceptionHandling ) == 0;
#ifdef TARGET_UNIX
// Register handler of hardware exceptions like null reference in PAL
@@ -939,7 +940,7 @@ ProcessCLRExceptionNew(IN PEXCEPTION_RECORD pExceptionRecord,
else
{
OBJECTREF oref = ExceptionTracker::CreateThrowable(pExceptionRecord, FALSE);
- DispatchManagedException(oref, pContextRecord);
+ DispatchManagedException(oref, pContextRecord, /* preserveStackTrace */ false);
}
}
#endif // !HOST_UNIX
@@ -5451,7 +5452,7 @@ BOOL HandleHardwareException(PAL_SEHException* ex)
if (ex->GetExceptionRecord()->ExceptionCode != STATUS_BREAKPOINT && ex->GetExceptionRecord()->ExceptionCode != STATUS_SINGLE_STEP)
{
// A hardware exception is handled only if it happened in a jitted code or
- // in one of the JIT helper functions (JIT_MemSet, ...)
+ // in one of the JIT helper functions
PCODE controlPc = GetIP(ex->GetContextRecord());
if (ExecutionManager::IsManagedCode(controlPc) && IsGcMarker(ex->GetContextRecord(), ex->GetExceptionRecord()))
{
@@ -6623,6 +6624,13 @@ bool ExceptionTracker::IsInStackRegionUnwoundBySpecifiedException(CrawlFrame * p
// Remember that sfLowerBound and sfUpperBound are in the "OS format".
// Refer to the comment for CallerStackFrame for more information.
+
+ if (g_isNewExceptionHandlingEnabled)
+ {
+ // The new exception handling sets the ranges always to the SP of the unwound frame
+ return (sfLowerBound < csfToCheck) && (csfToCheck <= sfUpperBound);
+ }
+
#ifndef STACK_RANGE_BOUNDS_ARE_CALLER_SP
if ((sfLowerBound < csfToCheck) && (csfToCheck <= sfUpperBound))
#else // !STACK_RANGE_BOUNDS_ARE_CALLER_SP
@@ -7578,15 +7586,8 @@ extern "C" void QCALLTYPE AppendExceptionStackFrame(QCall::ObjectHandleOnStack e
_ASSERTE(pMD == codeInfo.GetMethodDesc());
#endif // _DEBUG
- // Compensate for a bug in the old EH that doesn't mark faulting instructions as faulting. The VS expects that behavior.
- bool hasFaulted = pExInfo->m_frameIter.m_crawl.HasFaulted();
- if (hasFaulted)
- {
- pExInfo->m_frameIter.m_crawl.hasFaulted = false;
- }
pExInfo->m_StackTraceInfo.AppendElement(canAllocateMemory, ip, sp, pMD, &pExInfo->m_frameIter.m_crawl);
pExInfo->m_StackTraceInfo.SaveStackTrace(canAllocateMemory, pExInfo->m_hThrowable, /*bReplaceStack*/FALSE, /*bSkipLastElement*/FALSE);
- pExInfo->m_frameIter.m_crawl.hasFaulted = hasFaulted;
}
// Notify the debugger that we are on the first pass for a managed exception.
@@ -7976,7 +7977,7 @@ struct ExtendedEHClauseEnumerator : EH_CLAUSE_ENUMERATOR
unsigned EHCount;
};
-extern "C" BOOL QCALLTYPE EHEnumInitFromStackFrameIterator(StackFrameIterator *pFrameIter, BYTE** pMethodStartAddress, EH_CLAUSE_ENUMERATOR * pEHEnum)
+extern "C" BOOL QCALLTYPE EHEnumInitFromStackFrameIterator(StackFrameIterator *pFrameIter, IJitManager::MethodRegionInfo* pMethodRegionInfo, EH_CLAUSE_ENUMERATOR * pEHEnum)
{
QCALL_CONTRACT;
@@ -7990,7 +7991,7 @@ extern "C" BOOL QCALLTYPE EHEnumInitFromStackFrameIterator(StackFrameIterator *p
IJitManager* pJitMan = pFrameIter->m_crawl.GetJitManager();
const METHODTOKEN& MethToken = pFrameIter->m_crawl.GetMethodToken();
- *pMethodStartAddress = (BYTE*)pJitMan->JitTokenToStartAddress(MethToken);
+ pJitMan->JitTokenToMethodRegionInfo(MethToken, pMethodRegionInfo);
pExtendedEHEnum->EHCount = pJitMan->InitializeEHEnumeration(MethToken, pEHEnum);
END_QCALL;
@@ -8210,18 +8211,6 @@ extern "C" bool QCALLTYPE SfiInit(StackFrameIterator* pThis, CONTEXT* pStackwalk
NotifyExceptionPassStarted(pThis, pThread, pExInfo);
- if (pFrame == FRAME_TOP)
- {
- // There are no managed frames on the stack, fail fast and report unhandled exception
- LONG disposition = InternalUnhandledExceptionFilter_Worker((EXCEPTION_POINTERS *)&pExInfo->m_ptrs);
-#ifdef HOST_WINDOWS
- CreateCrashDumpIfEnabled(/* fSOException */ FALSE);
- RaiseFailFastException(pExInfo->m_ptrs.ExceptionRecord, NULL, 0);
-#else
- CrashDumpAndTerminateProcess(pExInfo->m_ExceptionCode);
-#endif
- }
-
REGDISPLAY* pRD = &pExInfo->m_regDisplay;
pThread->FillRegDisplay(pRD, pStackwalkCtx);
@@ -8291,6 +8280,18 @@ extern "C" bool QCALLTYPE SfiInit(StackFrameIterator* pThis, CONTEXT* pStackwalk
*pfIsExceptionIntercepted = CheckExceptionInterception(pThis, pExInfo);
}
+ else
+ {
+ // There are no managed frames on the stack, fail fast and report unhandled exception
+ LONG disposition = InternalUnhandledExceptionFilter_Worker((EXCEPTION_POINTERS *)&pExInfo->m_ptrs);
+#ifdef HOST_WINDOWS
+ CreateCrashDumpIfEnabled(/* fSOException */ FALSE);
+ GetThread()->SetThreadStateNC(Thread::TSNC_ProcessedUnhandledException);
+ RaiseException(pExInfo->m_ExceptionCode, EXCEPTION_NONCONTINUABLE_EXCEPTION, pExInfo->m_ptrs.ExceptionRecord->NumberParameters, pExInfo->m_ptrs.ExceptionRecord->ExceptionInformation);
+#else
+ CrashDumpAndTerminateProcess(pExInfo->m_ExceptionCode);
+#endif
+ }
return result;
}
diff --git a/src/coreclr/vm/exceptionhandlingqcalls.h b/src/coreclr/vm/exceptionhandlingqcalls.h
index 7747c14f531d..7054080cef3c 100644
--- a/src/coreclr/vm/exceptionhandlingqcalls.h
+++ b/src/coreclr/vm/exceptionhandlingqcalls.h
@@ -17,7 +17,7 @@ extern "C" void QCALLTYPE CallFinallyFunclet(BYTE* pHandlerIP, REGDISPLAY* pvReg
extern "C" BOOL QCALLTYPE CallFilterFunclet(QCall::ObjectHandleOnStack exceptionObj, BYTE* pFilterP, REGDISPLAY* pvRegDisplay);
extern "C" void QCALLTYPE ResumeAtInterceptionLocation(REGDISPLAY* pvRegDisplay);
extern "C" void QCALLTYPE AppendExceptionStackFrame(QCall::ObjectHandleOnStack exceptionObj, SIZE_T ip, SIZE_T sp, int flags, ExInfo *pExInfo);
-extern "C" BOOL QCALLTYPE EHEnumInitFromStackFrameIterator(StackFrameIterator *pFrameIter, BYTE** pMethodStartAddress, EH_CLAUSE_ENUMERATOR * pEHEnum);
+extern "C" BOOL QCALLTYPE EHEnumInitFromStackFrameIterator(StackFrameIterator *pFrameIter, IJitManager::MethodRegionInfo *pMethodRegionInfo, EH_CLAUSE_ENUMERATOR * pEHEnum);
extern "C" BOOL QCALLTYPE EHEnumNext(EH_CLAUSE_ENUMERATOR* pEHEnum, RhEHClause* pEHClause);
extern "C" bool QCALLTYPE SfiInit(StackFrameIterator* pThis, CONTEXT* pStackwalkCtx, bool instructionFault, bool* pIsExceptionIntercepted);
extern "C" bool QCALLTYPE SfiNext(StackFrameIterator* pThis, unsigned int* uExCollideClauseIdx, bool* fUnwoundReversePInvoke, bool* pIsExceptionIntercepted);
diff --git a/src/coreclr/vm/interoputil.cpp b/src/coreclr/vm/interoputil.cpp
index d9e5d4375962..7a91cd41f7c1 100644
--- a/src/coreclr/vm/interoputil.cpp
+++ b/src/coreclr/vm/interoputil.cpp
@@ -2558,7 +2558,7 @@ BOOL IsMethodVisibleFromCom(MethodDesc *pMD)
mdMethodDef md = pMD->GetMemberDef();
// See if there is property information for this member.
- hr = pMD->GetModule()->GetPropertyInfoForMethodDef(md, &pd, &pPropName, &uSemantic);
+ hr = pMD->GetMDImport()->GetPropertyInfoForMethodDef(md, &pd, &pPropName, &uSemantic);
IfFailThrow(hr);
if (hr == S_OK)
diff --git a/src/coreclr/vm/jithelpers.cpp b/src/coreclr/vm/jithelpers.cpp
index 450752ae3677..1da021149602 100644
--- a/src/coreclr/vm/jithelpers.cpp
+++ b/src/coreclr/vm/jithelpers.cpp
@@ -3596,6 +3596,14 @@ NOINLINE HCIMPL3(CORINFO_MethodPtr, JIT_VirtualFunctionPointer_Framed, Object *
}
HCIMPLEND
+HCIMPL3(void, Jit_NativeMemSet, void* pDest, int value, size_t length)
+{
+ _ASSERTE(pDest != nullptr);
+ FCALL_CONTRACT;
+ memset(pDest, value, length);
+}
+HCIMPLEND
+
HCIMPL1(Object*, JIT_GetRuntimeFieldStub, CORINFO_FIELD_HANDLE field)
{
FCALL_CONTRACT;
diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp
index aa756361340b..5e6b0cbeeafd 100644
--- a/src/coreclr/vm/jitinterface.cpp
+++ b/src/coreclr/vm/jitinterface.cpp
@@ -10689,7 +10689,10 @@ void* CEEJitInfo::getHelperFtn(CorInfoHelpFunc ftnNum, /* IN */
dynamicFtnNum == DYNAMIC_CORINFO_HELP_CHKCASTCLASS_SPECIAL ||
dynamicFtnNum == DYNAMIC_CORINFO_HELP_UNBOX ||
dynamicFtnNum == DYNAMIC_CORINFO_HELP_ARRADDR_ST ||
- dynamicFtnNum == DYNAMIC_CORINFO_HELP_LDELEMA_REF)
+ dynamicFtnNum == DYNAMIC_CORINFO_HELP_LDELEMA_REF ||
+ dynamicFtnNum == DYNAMIC_CORINFO_HELP_MEMSET ||
+ dynamicFtnNum == DYNAMIC_CORINFO_HELP_MEMZERO ||
+ dynamicFtnNum == DYNAMIC_CORINFO_HELP_MEMCPY)
{
Precode* pPrecode = Precode::GetPrecodeFromEntryPoint((PCODE)hlpDynamicFuncTable[dynamicFtnNum].pfnHelper);
_ASSERTE(pPrecode->GetType() == PRECODE_FIXUP);
diff --git a/src/coreclr/vm/jitinterface.h b/src/coreclr/vm/jitinterface.h
index 63666b46552c..bbca5c355fbb 100644
--- a/src/coreclr/vm/jitinterface.h
+++ b/src/coreclr/vm/jitinterface.h
@@ -400,9 +400,6 @@ extern "C"
#endif // TARGET_AMD64 || TARGET_ARM
- void STDCALL JIT_MemSet(void *dest, int c, SIZE_T count);
- void STDCALL JIT_MemCpy(void *dest, const void *src, SIZE_T count);
-
void STDMETHODCALLTYPE JIT_ProfilerEnterLeaveTailcallStub(UINT_PTR ProfilerHandle);
#if !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64)
void STDCALL JIT_StackProbe();
diff --git a/src/coreclr/vm/loongarch64/crthelpers.S b/src/coreclr/vm/loongarch64/crthelpers.S
deleted file mode 100644
index 88fd21938fda..000000000000
--- a/src/coreclr/vm/loongarch64/crthelpers.S
+++ /dev/null
@@ -1,37 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-#include "unixasmmacros.inc"
-
-// JIT_MemSet/JIT_MemCpy
-//
-// It is IMPORTANT that the exception handling code is able to find these guys
-// on the stack, but on non-windows platforms we can just defer to the platform
-// implementation.
-//
-LEAF_ENTRY JIT_MemSet, _TEXT
- beq $a2, $zero, LOCAL_LABEL(JIT_MemSet_ret)
-
- ld.b $zero, $a0, 0 //Is this really needed ?
-
- b memset
-
-LOCAL_LABEL(JIT_MemSet_ret):
- jirl $r0, $ra, 0
-
-////NOTO: Here must use LEAF_END_MARKED! not LEAF_END !!!
-LEAF_END_MARKED JIT_MemSet, _TEXT
-
-LEAF_ENTRY JIT_MemCpy, _TEXT
- beq $a2, $zero, LOCAL_LABEL(JIT_MemCpy_ret)
-
- ld.b $zero, $a0, 0
- ld.b $zero, $a1, 0 //Is this really needed ?
-
- b memcpy
-
-LOCAL_LABEL(JIT_MemCpy_ret):
- jirl $r0, $ra, 0
-
-////NOTO: Here must use LEAF_END_MARKED! not LEAF_END !!!
-LEAF_END_MARKED JIT_MemCpy, _TEXT
diff --git a/src/coreclr/vm/loongarch64/stubs.cpp b/src/coreclr/vm/loongarch64/stubs.cpp
index da93bd587ed3..052d71ebc1e4 100644
--- a/src/coreclr/vm/loongarch64/stubs.cpp
+++ b/src/coreclr/vm/loongarch64/stubs.cpp
@@ -1928,7 +1928,7 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
}
}
- _ASSERTE(indirectionsDataSize == dataOffset);
+ _ASSERTE((indirectionsDataSize ? indirectionsDataSize : codeSize) == dataOffset);
// No null test required
if (!pLookup->testForNull)
diff --git a/src/coreclr/vm/metasig.h b/src/coreclr/vm/metasig.h
index 45cb5700db52..182acc55e643 100644
--- a/src/coreclr/vm/metasig.h
+++ b/src/coreclr/vm/metasig.h
@@ -237,6 +237,9 @@ DEFINE_METASIG(SM(PtrSByt_RetInt, P(B), i))
DEFINE_METASIG(SM(IntPtr_RetIntPtr, I, I))
DEFINE_METASIG(SM(UIntPtr_RetIntPtr, U, I))
DEFINE_METASIG(SM(PtrByte_PtrByte_Int_RetVoid, P(b) P(b) i, v))
+DEFINE_METASIG(SM(RefByte_RefByte_UIntPtr_RetVoid, r(b) r(b) U, v))
+DEFINE_METASIG(SM(RefByte_Byte_UIntPtr_RetVoid, r(b) b U, v))
+DEFINE_METASIG(SM(RefByte_UIntPtr_RetVoid, r(b) U, v))
DEFINE_METASIG(SM(PtrVoid_Byte_UInt_RetVoid, P(v) b K, v))
DEFINE_METASIG(SM(RefObj_IntPtr_RetVoid, r(j) I, v))
DEFINE_METASIG(SM(RefObj_RefIntPtr_RetVoid, r(j) r(I), v))
diff --git a/src/coreclr/vm/profdetach.cpp b/src/coreclr/vm/profdetach.cpp
index bf138209ce6a..09f11458c388 100644
--- a/src/coreclr/vm/profdetach.cpp
+++ b/src/coreclr/vm/profdetach.cpp
@@ -326,7 +326,7 @@ void ProfilingAPIDetach::ExecuteEvacuationLoop()
{
CRITSEC_Holder csh(ProfilingAPIUtility::GetStatusCrst());
- for (SIZE_T pos = 0; pos < s_profilerDetachInfos.Size(); ++pos)
+ while (s_profilerDetachInfos.Size() > 0)
{
ProfilerDetachInfo current = s_profilerDetachInfos.Pop();
diff --git a/src/coreclr/vm/qcallentrypoints.cpp b/src/coreclr/vm/qcallentrypoints.cpp
index 8e6a67029794..2d6466fc7d7c 100644
--- a/src/coreclr/vm/qcallentrypoints.cpp
+++ b/src/coreclr/vm/qcallentrypoints.cpp
@@ -324,6 +324,7 @@ static const Entry s_QCall[] =
DllImportEntry(GetFileLoadExceptionMessage)
DllImportEntry(FileLoadException_GetMessageForHR)
DllImportEntry(Interlocked_MemoryBarrierProcessWide)
+ DllImportEntry(ObjectNative_AllocateUninitializedClone)
DllImportEntry(Monitor_Wait)
DllImportEntry(Monitor_Pulse)
DllImportEntry(Monitor_PulseAll)
diff --git a/src/coreclr/vm/riscv64/crthelpers.S b/src/coreclr/vm/riscv64/crthelpers.S
deleted file mode 100644
index 3151387b3caf..000000000000
--- a/src/coreclr/vm/riscv64/crthelpers.S
+++ /dev/null
@@ -1,36 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-#include "unixasmmacros.inc"
-
-// JIT_MemSet/JIT_MemCpy
-//
-// It is IMPORTANT that the exception handling code is able to find these guys
-// on the stack, but on non-windows platforms we can just defer to the platform
-// implementation.
-//
-LEAF_ENTRY JIT_MemSet, _TEXT
- beq a2, zero, LOCAL_LABEL(JIT_MemSet_ret)
-
- lb zero, 0(a0) // Is this really needed ?
-
- tail memset
-
-LOCAL_LABEL(JIT_MemSet_ret):
- ret
-LEAF_END_MARKED JIT_MemSet, _TEXT
-
-////NOTE: Here must use LEAF_END_MARKED! not LEAF_END !!!
-LEAF_ENTRY JIT_MemCpy, _TEXT
- beq a2, zero, LOCAL_LABEL(JIT_MemCpy_ret)
-
- lb zero, 0(a0)
- lb zero, 0(a1) // Is this really needed ?
-
- tail memcpy
-
-LOCAL_LABEL(JIT_MemCpy_ret):
- ret
-
-////NOTE: Here must use LEAF_END_MARKED! not LEAF_END !!!
-LEAF_END_MARKED JIT_MemCpy, _TEXT
diff --git a/src/coreclr/vm/threads.cpp b/src/coreclr/vm/threads.cpp
index 081f5c6cf7a7..15b3df1f6ae1 100644
--- a/src/coreclr/vm/threads.cpp
+++ b/src/coreclr/vm/threads.cpp
@@ -8276,12 +8276,7 @@ void Thread::InitializeSpecialUserModeApc()
return;
}
- // In the future, once code paths using the special user-mode APC get some bake time, it should be used regardless of
- // whether CET shadow stacks are enabled
- if (AreCetShadowStacksEnabled())
- {
- s_pfnQueueUserAPC2Proc = pfnQueueUserAPC2Proc;
- }
+ s_pfnQueueUserAPC2Proc = pfnQueueUserAPC2Proc;
}
#endif // FEATURE_SPECIAL_USER_MODE_APC
diff --git a/src/installer/Directory.Build.targets b/src/installer/Directory.Build.targets
index dccb8277ba76..c4e8a8c8fb70 100644
--- a/src/installer/Directory.Build.targets
+++ b/src/installer/Directory.Build.targets
@@ -1,7 +1,7 @@
<Project>
<PropertyGroup>
- <InstallerName Condition="'$(PgoInstrument)' != ''">$(InstallerName)-pgo</InstallerName>
+ <InstallerName>$(InstallerName)</InstallerName>
<ArchiveName Condition="'$(PgoInstrument)' != ''">$(ArchiveName)-pgo</ArchiveName>
</PropertyGroup>
diff --git a/src/installer/managed/Microsoft.NET.HostModel/Microsoft.NET.HostModel.csproj b/src/installer/managed/Microsoft.NET.HostModel/Microsoft.NET.HostModel.csproj
index 4b2f49ec4356..fec324d46ccc 100644
--- a/src/installer/managed/Microsoft.NET.HostModel/Microsoft.NET.HostModel.csproj
+++ b/src/installer/managed/Microsoft.NET.HostModel/Microsoft.NET.HostModel.csproj
@@ -4,7 +4,7 @@
<TargetFramework>netstandard2.0</TargetFramework>
<Description>Abstractions for modifying .NET host binaries</Description>
<IsShipping>false</IsShipping>
- <IsPackable Condition="'$(PgoInstrument)' == ''">true</IsPackable>
+ <IsPackable Condition="'$(BuildOnlyPgoInstrumentedAssets)' != 'true'">true</IsPackable>
<GeneratePackageOnBuild>true</GeneratePackageOnBuild>
<IncludeSymbols>true</IncludeSymbols>
<Serviceable>true</Serviceable>
diff --git a/src/installer/pkg/sfx/Directory.Build.props b/src/installer/pkg/sfx/Directory.Build.props
index b0711d7f7ac9..dbf349249cef 100644
--- a/src/installer/pkg/sfx/Directory.Build.props
+++ b/src/installer/pkg/sfx/Directory.Build.props
@@ -11,7 +11,7 @@
<DisableImplicitFrameworkReferences>true</DisableImplicitFrameworkReferences>
<UseRuntimePackageDisclaimer>true</UseRuntimePackageDisclaimer>
</PropertyGroup>
- <PropertyGroup Condition="'$(PgoInstrument)' != 'true'">
+ <PropertyGroup Condition="'$(BuildOnlyPgoInstrumentedAssets)' != 'true'">
<GenerateInstallers>true</GenerateInstallers>
<GenerateVSInsertionPackages>true</GenerateVSInsertionPackages>
</PropertyGroup>
diff --git a/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Crossgen2.sfxproj b/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Crossgen2.sfxproj
index 337a0ce1ebee..c3fd9eab4904 100644
--- a/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Crossgen2.sfxproj
+++ b/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Crossgen2.sfxproj
@@ -7,12 +7,13 @@
<SkipBuild Condition="'$(RuntimeFlavor)' == 'Mono'">true</SkipBuild>
<PlatformPackageType>ToolPack</PlatformPackageType>
<SharedFrameworkName>$(SharedFrameworkName).Crossgen2</SharedFrameworkName>
- <PgoSuffix Condition="'$(PgoInstrument)' != ''">.PGO</PgoSuffix>
<OverridePackageId>$(SharedFrameworkName)$(PgoSuffix).$(RuntimeIdentifier)</OverridePackageId>
<ArchiveName>dotnet-crossgen2</ArchiveName>
<SharedFrameworkHostFileNameOverride>crossgen2</SharedFrameworkHostFileNameOverride>
<!-- Build this pack for any RID if building from source. Otherwise, only build select RIDs. -->
- <RuntimeIdentifiers Condition="'$(DotNetBuildSourceOnly)' != 'true'">linux-x64;linux-musl-x64;linux-arm;linux-musl-arm;linux-arm64;linux-musl-arm64;freebsd-x64;freebsd-arm64;osx-x64;osx-arm64;win-x64;win-x86;win-arm64</RuntimeIdentifiers>
+ <RuntimeIdentifiers Condition="'$(DotNetBuildSourceOnly)' != 'true'">linux-x64;linux-musl-x64;linux-arm;linux-musl-arm;linux-arm64;linux-musl-arm64;osx-x64;osx-arm64;win-x64;win-x86;win-arm64</RuntimeIdentifiers>
+ <!-- FreeBSD runtime/apphost packs aren't built in the official build so only reference the RIDs when targetting FreeBSD -->
+ <RuntimeIdentifiers Condition="'$(DotNetBuildSourceOnly)' != 'true' and '$(TargetOS)' == 'freebsd'">$(RuntimeIdentifiers);freebsd-x64;freebsd-arm64</RuntimeIdentifiers>
<GenerateInstallers>false</GenerateInstallers>
<HostJsonTargetPath>tools/</HostJsonTargetPath>
<PermitDllAndExeFilesLackingFileVersion>true</PermitDllAndExeFilesLackingFileVersion>
diff --git a/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Host.sfxproj b/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Host.sfxproj
index 8cd98f995ee0..fc7b8b90fe90 100644
--- a/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Host.sfxproj
+++ b/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Host.sfxproj
@@ -8,7 +8,7 @@
<ArchiveName>dotnet-apphost-pack</ArchiveName>
<InstallerName>dotnet-apphost-pack</InstallerName>
<VSInsertionShortComponentName>NetCore.AppHostPack</VSInsertionShortComponentName>
- <IsPackable Condition="'$(PgoInstrument)' != ''">false</IsPackable>
+ <IsPackable Condition="'$(BuildOnlyPgoInstrumentedAssets)' != ''">false</IsPackable>
</PropertyGroup>
<!--
diff --git a/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Ref.sfxproj b/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Ref.sfxproj
index 65ba460d088a..3580d2c54465 100644
--- a/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Ref.sfxproj
+++ b/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Ref.sfxproj
@@ -5,7 +5,7 @@
<PlatformPackageType>TargetingPack</PlatformPackageType>
<UseTemplatedPlatformManifest>true</UseTemplatedPlatformManifest>
<InstallerName>dotnet-targeting-pack</InstallerName>
- <IsPackable Condition="'$(PgoInstrument)' != ''">false</IsPackable>
+ <IsPackable Condition="'$(BuildOnlyPgoInstrumentedAssets)' != ''">false</IsPackable>
<VSInsertionShortComponentName>NetCore.TargetingPack</VSInsertionShortComponentName>
<PackageDescription>A set of .NET APIs that are included in the default .NET application model. Contains reference assemblies, documentation, and other design-time assets.</PackageDescription>
</PropertyGroup>
diff --git a/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Runtime.sfxproj b/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Runtime.sfxproj
index 1f6b55038026..3389feacd0c0 100644
--- a/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Runtime.sfxproj
+++ b/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Runtime.sfxproj
@@ -8,7 +8,7 @@
<InstallerName Condition="'$(TargetOS)' != 'osx'">dotnet-runtime</InstallerName>
<InstallerName Condition="'$(TargetOS)' == 'osx'">dotnet-runtime-internal</InstallerName>
<CreateSymbolsArchive Condition="'$(PgoInstrument)' == ''">true</CreateSymbolsArchive>
- <IsPackable Condition="'$(PgoInstrument)' != ''">false</IsPackable>
+ <IsPackable Condition="'$(BuildOnlyPgoInstrumentedAssets)' != ''">false</IsPackable>
<SymbolsArchiveName>dotnet-runtime-symbols</SymbolsArchiveName>
<VSInsertionShortComponentName>NetCore.SharedFramework</VSInsertionShortComponentName>
<UseTemplatedPlatformManifest>true</UseTemplatedPlatformManifest>
diff --git a/src/installer/pkg/sfx/bundle/Microsoft.NETCore.App.Bundle.bundleproj b/src/installer/pkg/sfx/bundle/Microsoft.NETCore.App.Bundle.bundleproj
index acfcdd089bbe..178a37fed055 100644
--- a/src/installer/pkg/sfx/bundle/Microsoft.NETCore.App.Bundle.bundleproj
+++ b/src/installer/pkg/sfx/bundle/Microsoft.NETCore.App.Bundle.bundleproj
@@ -19,7 +19,7 @@
</PropertyGroup>
<ItemGroup>
- <PackageReference Condition="'$(PgoInstrument)' != 'true'" Include="Microsoft.DotNet.Build.Tasks.Installers" Version="$(MicrosoftDotNetBuildTasksInstallersVersion)" />
+ <PackageReference Condition="'$(BuildOnlyPgoInstrumentedAssets)' != 'true'" Include="Microsoft.DotNet.Build.Tasks.Installers" Version="$(MicrosoftDotNetBuildTasksInstallersVersion)" />
<PackageReference Include="Microsoft.DotNet.Build.Tasks.Archives" Version="$(MicrosoftDotNetBuildTasksArchivesVersion)" />
</ItemGroup>
diff --git a/src/installer/prepare-artifacts.proj b/src/installer/prepare-artifacts.proj
index 5413e825c951..a9e0931c1160 100644
--- a/src/installer/prepare-artifacts.proj
+++ b/src/installer/prepare-artifacts.proj
@@ -23,6 +23,8 @@
</PropertyGroup>
<Import Project="../tools/Sign.props" Sdk="Microsoft.DotNet.Arcade.Sdk" />
+ <Import Project="$(RepositoryEngineeringDir)Publishing.props" Condition="Exists('$(RepositoryEngineeringDir)Publishing.props')" />
+
<UsingTask TaskName="GenerateChecksums" AssemblyFile="$(InstallerTasksAssemblyPath)" />
<PropertyGroup>
@@ -56,6 +58,16 @@
<ManifestBuildData Include="AzureDevOpsBranch=$(BUILD_SOURCEBRANCH)" />
</ItemGroup>
+ <!--
+ Set metadata for assets that are not marked as NonShipping.
+ This is used to determine if the asset should be shipped as part of .NET release.
+ -->
+ <ItemDefinitionGroup>
+ <ItemsToPush>
+ <ManifestArtifactData Condition="'$(ProducesDotNetReleaseShippingAssets)' == 'true'">DotNetReleaseShipping=true</ManifestArtifactData>
+ </ItemsToPush>
+ </ItemDefinitionGroup>
+
<!--
Run Arcade's signing project directly. The 'eng/Signing.props' extensibility props file checks
if '$(<StageName>)' == 'true' and points Arcade to the correct files.
diff --git a/src/libraries/Common/src/Interop/OSX/System.Security.Cryptography.Native.Apple/Interop.SecKeyRef.macOS.cs b/src/libraries/Common/src/Interop/OSX/System.Security.Cryptography.Native.Apple/Interop.SecKeyRef.macOS.cs
index e4bca886045c..74f4ccd2fafb 100644
--- a/src/libraries/Common/src/Interop/OSX/System.Security.Cryptography.Native.Apple/Interop.SecKeyRef.macOS.cs
+++ b/src/libraries/Common/src/Interop/OSX/System.Security.Cryptography.Native.Apple/Interop.SecKeyRef.macOS.cs
@@ -3,6 +3,7 @@
using System;
using System.Diagnostics;
+using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Security.Cryptography;
using System.Security.Cryptography.Apple;
@@ -36,7 +37,7 @@ internal static partial class Interop
internal static SafeSecKeyRefHandle ImportEphemeralKey(ReadOnlySpan<byte> keyBlob, bool hasPrivateKey)
{
- Debug.Assert(keyBlob != null);
+ Debug.Assert(!Unsafe.IsNullRef(ref MemoryMarshal.GetReference(keyBlob)));
SafeSecKeyRefHandle keyHandle;
int osStatus;
diff --git a/src/libraries/Common/src/Microsoft/Win32/SafeHandles/SafeUnicodeStringHandle.cs b/src/libraries/Common/src/Microsoft/Win32/SafeHandles/SafeUnicodeStringHandle.cs
index e58bfd19695e..14d47033190c 100644
--- a/src/libraries/Common/src/Microsoft/Win32/SafeHandles/SafeUnicodeStringHandle.cs
+++ b/src/libraries/Common/src/Microsoft/Win32/SafeHandles/SafeUnicodeStringHandle.cs
@@ -2,6 +2,7 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System;
+using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
namespace Microsoft.Win32.SafeHandles
@@ -23,12 +24,12 @@ namespace Microsoft.Win32.SafeHandles
public unsafe SafeUnicodeStringHandle(ReadOnlySpan<char> s)
: base(IntPtr.Zero, ownsHandle: true)
{
- // If s == default then the span represents the null string,
+ // If s contains a null ref then the span represents the null string,
// and handle should be IntPtr.Zero to match Marshal.StringToHGlobalUni.
//
// Since that was already done in the base ctor call, we only need to do
- // work when s != default.
- if (s != default)
+ // work when s does not contain a null ref.
+ if (!Unsafe.IsNullRef(ref MemoryMarshal.GetReference(s)))
{
int cch = checked(s.Length + 1);
int cb = checked(cch * sizeof(char));
diff --git a/src/libraries/Common/src/System/Net/Http/aspnetcore/Http2/Hpack/Huffman.cs b/src/libraries/Common/src/System/Net/Http/aspnetcore/Http2/Hpack/Huffman.cs
index b23b7e63529d..980bfbc35799 100644
--- a/src/libraries/Common/src/System/Net/Http/aspnetcore/Http2/Hpack/Huffman.cs
+++ b/src/libraries/Common/src/System/Net/Http/aspnetcore/Http2/Hpack/Huffman.cs
@@ -677,7 +677,7 @@ namespace System.Net.Http.HPack
// see comments in GenerateDecodingLookupTree() describing decoding table
Span<byte> dst = dstArray;
- Debug.Assert(dst != null && dst.Length > 0);
+ Debug.Assert(dst.Length > 0);
ushort[] decodingTree = s_decodingTree;
diff --git a/src/libraries/Common/src/System/Net/Security/CertificateValidation.OSX.cs b/src/libraries/Common/src/System/Net/Security/CertificateValidation.OSX.cs
index aee4b77b5083..b269a0fb70fa 100644
--- a/src/libraries/Common/src/System/Net/Security/CertificateValidation.OSX.cs
+++ b/src/libraries/Common/src/System/Net/Security/CertificateValidation.OSX.cs
@@ -14,7 +14,7 @@ namespace System.Net.Security
private static readonly IdnMapping s_idnMapping = new IdnMapping();
// WARNING: This function will do the verification using OpenSSL. If the intention is to use OS function, caller should use CertificatePal interface.
- internal static SslPolicyErrors BuildChainAndVerifyProperties(X509Chain chain, X509Certificate2 remoteCertificate, bool checkCertName, bool _ /*isServer*/, string? hostName, IntPtr certificateBuffer, int bufferLength = 0)
+ internal static SslPolicyErrors BuildChainAndVerifyProperties(X509Chain chain, X509Certificate2 remoteCertificate, bool checkCertName, bool _ /*isServer*/, string? hostName, Span<byte> certificateBuffer)
{
SslPolicyErrors errors = chain.Build(remoteCertificate) ?
SslPolicyErrors.None :
@@ -31,15 +31,24 @@ namespace System.Net.Security
}
SafeX509Handle certHandle;
- if (certificateBuffer != IntPtr.Zero && bufferLength > 0)
+ unsafe
{
- certHandle = Interop.Crypto.DecodeX509(certificateBuffer, bufferLength);
- }
- else
- {
- // We dont't have DER encoded buffer.
- byte[] der = remoteCertificate.Export(X509ContentType.Cert);
- certHandle = Interop.Crypto.DecodeX509(Marshal.UnsafeAddrOfPinnedArrayElement(der, 0), der.Length);
+ if (certificateBuffer.Length > 0)
+ {
+ fixed (byte* pCert = certificateBuffer)
+ {
+ certHandle = Interop.Crypto.DecodeX509((IntPtr)pCert, certificateBuffer.Length);
+ }
+ }
+ else
+ {
+ // We dont't have DER encoded buffer.
+ byte[] der = remoteCertificate.Export(X509ContentType.Cert);
+ fixed (byte* pDer = der)
+ {
+ certHandle = Interop.Crypto.DecodeX509((IntPtr)pDer, der.Length);
+ }
+ }
}
int hostNameMatch;
diff --git a/src/libraries/Common/src/System/Net/Security/CertificateValidation.Unix.cs b/src/libraries/Common/src/System/Net/Security/CertificateValidation.Unix.cs
index 65a1adb492fa..da3cb38a8682 100644
--- a/src/libraries/Common/src/System/Net/Security/CertificateValidation.Unix.cs
+++ b/src/libraries/Common/src/System/Net/Security/CertificateValidation.Unix.cs
@@ -13,7 +13,7 @@ namespace System.Net.Security
private static readonly IdnMapping s_idnMapping = new IdnMapping();
#pragma warning disable IDE0060
- internal static SslPolicyErrors BuildChainAndVerifyProperties(X509Chain chain, X509Certificate2 remoteCertificate, bool checkCertName, bool isServer, string? hostName, IntPtr certificateBuffer, int bufferLength)
+ internal static SslPolicyErrors BuildChainAndVerifyProperties(X509Chain chain, X509Certificate2 remoteCertificate, bool checkCertName, bool isServer, string? hostName, Span<byte> certificateBuffer)
=> BuildChainAndVerifyProperties(chain, remoteCertificate, checkCertName, isServer, hostName);
#pragma warning restore IDE0060
diff --git a/src/libraries/Common/src/System/Net/Security/CertificateValidation.Windows.cs b/src/libraries/Common/src/System/Net/Security/CertificateValidation.Windows.cs
index d068015e534c..90be80c734cc 100644
--- a/src/libraries/Common/src/System/Net/Security/CertificateValidation.Windows.cs
+++ b/src/libraries/Common/src/System/Net/Security/CertificateValidation.Windows.cs
@@ -14,7 +14,7 @@ namespace System.Net
internal static partial class CertificateValidation
{
#pragma warning disable IDE0060
- internal static SslPolicyErrors BuildChainAndVerifyProperties(X509Chain chain, X509Certificate2 remoteCertificate, bool checkCertName, bool isServer, string? hostName, IntPtr certificateBuffer, int bufferLength)
+ internal static SslPolicyErrors BuildChainAndVerifyProperties(X509Chain chain, X509Certificate2 remoteCertificate, bool checkCertName, bool isServer, string? hostName, Span<byte> certificateBuffer)
=> BuildChainAndVerifyProperties(chain, remoteCertificate, checkCertName, isServer, hostName);
#pragma warning restore IDE0060
diff --git a/src/libraries/Common/src/System/Number.Parsing.Common.cs b/src/libraries/Common/src/System/Number.Parsing.Common.cs
index 16e9f777f346..e43cbe14c292 100644
--- a/src/libraries/Common/src/System/Number.Parsing.Common.cs
+++ b/src/libraries/Common/src/System/Number.Parsing.Common.cs
@@ -336,7 +336,7 @@ namespace System
private static unsafe TChar* MatchChars<TChar>(TChar* p, TChar* pEnd, ReadOnlySpan<TChar> value)
where TChar : unmanaged, IUtfChar<TChar>
{
- Debug.Assert((p != null) && (pEnd != null) && (p <= pEnd) && (value != null));
+ Debug.Assert((p != null) && (pEnd != null) && (p <= pEnd));
fixed (TChar* stringPointer = &MemoryMarshal.GetReference(value))
{
diff --git a/src/libraries/Common/src/System/Security/Cryptography/Pkcs12Kdf.cs b/src/libraries/Common/src/System/Security/Cryptography/Pkcs12Kdf.cs
index 8e482b931c76..1fa1d0ee0339 100644
--- a/src/libraries/Common/src/System/Security/Cryptography/Pkcs12Kdf.cs
+++ b/src/libraries/Common/src/System/Security/Cryptography/Pkcs12Kdf.cs
@@ -3,6 +3,8 @@
using System.Collections.Generic;
using System.Diagnostics;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
using System.Text;
namespace System.Security.Cryptography.Pkcs
@@ -115,9 +117,15 @@ namespace System.Security.Cryptography.Pkcs
// The password is a null-terminated UTF-16BE version of the input.
int passLen = checked((password.Length + 1) * 2);
- // If password == default then the span represents the null string (as opposed to
+ // If password contains a null ref then the span represents the null string (as opposed to
// an empty string), and the P block should then have size 0 in the next step.
+#if NETSTANDARD
+#pragma warning disable CA2265 // Do not compare Span<T> to 'default'
if (password == default)
+#pragma warning restore CA2265
+#else
+ if (Unsafe.IsNullRef(ref MemoryMarshal.GetReference(password)))
+#endif
{
passLen = 0;
}
diff --git a/src/libraries/Common/tests/System/Net/Configuration.Sockets.cs b/src/libraries/Common/tests/System/Net/Configuration.Sockets.cs
index f9f9ba1dc17e..761998370685 100644
--- a/src/libraries/Common/tests/System/Net/Configuration.Sockets.cs
+++ b/src/libraries/Common/tests/System/Net/Configuration.Sockets.cs
@@ -29,7 +29,7 @@ namespace System.Net.Test.Common
{
yield return new[] { IPAddress.Loopback };
}
- if (Socket.OSSupportsIPv6)
+ if (Socket.OSSupportsIPv6 && IsIPv6LoopbackAvailable)
{
yield return new[] { IPAddress.IPv6Loopback };
}
@@ -46,6 +46,23 @@ namespace System.Net.Test.Common
.Select(a => a.Address)
.Where(a => a.IsIPv6LinkLocal)
.FirstOrDefault();
+
+ private static readonly Lazy<bool> _isIPv6LoopbackAvailable = new Lazy<bool>(GetIsIPv6LoopbackAvailable);
+ public static bool IsIPv6LoopbackAvailable => _isIPv6LoopbackAvailable.Value;
+
+ private static bool GetIsIPv6LoopbackAvailable()
+ {
+ try
+ {
+ using Socket s = new Socket(AddressFamily.InterNetworkV6, SocketType.Dgram, ProtocolType.Udp);
+ s.Bind(new IPEndPoint(IPAddress.IPv6Loopback, 0));
+ return true;
+ }
+ catch (SocketException)
+ {
+ return false;
+ }
+ }
}
}
}
diff --git a/src/libraries/Microsoft.Extensions.DependencyInjection/src/ServiceLookup/CallSiteValidator.cs b/src/libraries/Microsoft.Extensions.DependencyInjection/src/ServiceLookup/CallSiteValidator.cs
index 433b53b5cebe..9ee79abbdd7f 100644
--- a/src/libraries/Microsoft.Extensions.DependencyInjection/src/ServiceLookup/CallSiteValidator.cs
+++ b/src/libraries/Microsoft.Extensions.DependencyInjection/src/ServiceLookup/CallSiteValidator.cs
@@ -41,18 +41,27 @@ namespace Microsoft.Extensions.DependencyInjection.ServiceLookup
// First, check if we have encountered this call site before to prevent visiting call site trees that have already been visited
// If firstScopedServiceInCallSiteTree is null there are no scoped dependencies in this service's call site tree
// If firstScopedServiceInCallSiteTree has a value, it contains the first scoped service in this service's call site tree
- if (_scopedServices.TryGetValue(callSite.Cache.Key, out Type? firstScopedServiceInCallSiteTree))
+ if (!_scopedServices.TryGetValue(callSite.Cache.Key, out Type? firstScopedServiceInCallSiteTree))
{
- return firstScopedServiceInCallSiteTree;
- }
+ // This call site wasn't cached yet, walk the tree
+ firstScopedServiceInCallSiteTree = base.VisitCallSite(callSite, argument);
- // Walk the tree
- Type? scoped = base.VisitCallSite(callSite, argument);
+ // Cache the result
+ _scopedServices[callSite.Cache.Key] = firstScopedServiceInCallSiteTree;
+ }
- // Store the result for each visited service
- _scopedServices[callSite.Cache.Key] = scoped;
+ // If there is a scoped service in the call site tree, make sure we are not resolving it from a singleton
+ if (firstScopedServiceInCallSiteTree != null && argument.Singleton != null)
+ {
+ throw new InvalidOperationException(SR.Format(SR.ScopedInSingletonException,
+ callSite.ServiceType,
+ argument.Singleton.ServiceType,
+ nameof(ServiceLifetime.Scoped).ToLowerInvariant(),
+ nameof(ServiceLifetime.Singleton).ToLowerInvariant()
+ ));
+ }
- return scoped;
+ return firstScopedServiceInCallSiteTree;
}
protected override Type? VisitConstructor(ConstructorCallSite constructorCallSite, CallSiteValidatorState state)
@@ -91,15 +100,6 @@ namespace Microsoft.Extensions.DependencyInjection.ServiceLookup
{
return null;
}
- if (state.Singleton != null)
- {
- throw new InvalidOperationException(SR.Format(SR.ScopedInSingletonException,
- scopedCallSite.ServiceType,
- state.Singleton.ServiceType,
- nameof(ServiceLifetime.Scoped).ToLowerInvariant(),
- nameof(ServiceLifetime.Singleton).ToLowerInvariant()
- ));
- }
VisitCallSiteMain(scopedCallSite, state);
return scopedCallSite.ServiceType;
diff --git a/src/libraries/Microsoft.Extensions.DependencyInjection/tests/DI.Tests/ServiceProviderValidationTests.cs b/src/libraries/Microsoft.Extensions.DependencyInjection/tests/DI.Tests/ServiceProviderValidationTests.cs
index a5ee249bd470..006dd15aa8da 100644
--- a/src/libraries/Microsoft.Extensions.DependencyInjection/tests/DI.Tests/ServiceProviderValidationTests.cs
+++ b/src/libraries/Microsoft.Extensions.DependencyInjection/tests/DI.Tests/ServiceProviderValidationTests.cs
@@ -240,6 +240,84 @@ namespace Microsoft.Extensions.DependencyInjection.Tests
}
[Fact]
+ public void BuildServiceProvider_ValidateOnBuild_Throws_WhenScopedIsInjectedIntoSingleton()
+ {
+ // Arrange
+ var serviceCollection = new ServiceCollection();
+ serviceCollection.AddScoped<IBar, Bar>();
+ serviceCollection.AddSingleton<IFoo, Foo>();
+
+ // Act + Assert
+ var aggregateException = Assert.Throws<AggregateException>(() => serviceCollection.BuildServiceProvider(new ServiceProviderOptions() { ValidateOnBuild = true, ValidateScopes = true }));
+ Assert.StartsWith("Some services are not able to be constructed", aggregateException.Message);
+ Assert.Equal(1, aggregateException.InnerExceptions.Count);
+ Assert.Equal("Error while validating the service descriptor 'ServiceType: Microsoft.Extensions.DependencyInjection.Tests.ServiceProviderValidationTests+IFoo Lifetime: Singleton ImplementationType: Microsoft.Extensions.DependencyInjection.Tests.ServiceProviderValidationTests+Foo': " +
+ "Cannot consume scoped service 'Microsoft.Extensions.DependencyInjection.Tests.ServiceProviderValidationTests+IBar' from singleton 'Microsoft.Extensions.DependencyInjection.Tests.ServiceProviderValidationTests+IFoo'."
+ , aggregateException.InnerExceptions[0].Message);
+ }
+
+ [Fact]
+ public void BuildServiceProvider_ValidateOnBuild_Throws_WhenScopedIsInjectedIntoSingleton_ReverseRegistrationOrder()
+ {
+ // Arrange
+ var serviceCollection = new ServiceCollection();
+ serviceCollection.AddSingleton<IFoo, Foo>();
+ serviceCollection.AddScoped<IBar, Bar>();
+
+ // Act + Assert
+ var aggregateException = Assert.Throws<AggregateException>(() => serviceCollection.BuildServiceProvider(new ServiceProviderOptions() { ValidateOnBuild = true, ValidateScopes = true }));
+ Assert.StartsWith("Some services are not able to be constructed", aggregateException.Message);
+ Assert.Equal(1, aggregateException.InnerExceptions.Count);
+ Assert.Equal("Error while validating the service descriptor 'ServiceType: Microsoft.Extensions.DependencyInjection.Tests.ServiceProviderValidationTests+IFoo Lifetime: Singleton ImplementationType: Microsoft.Extensions.DependencyInjection.Tests.ServiceProviderValidationTests+Foo': " +
+ "Cannot consume scoped service 'Microsoft.Extensions.DependencyInjection.Tests.ServiceProviderValidationTests+IBar' from singleton 'Microsoft.Extensions.DependencyInjection.Tests.ServiceProviderValidationTests+IFoo'."
+ , aggregateException.InnerExceptions[0].Message);
+ }
+
+ [Fact]
+ public void BuildServiceProvider_ValidateOnBuild_DoesNotThrow_WhenScopeFactoryIsInjectedIntoSingleton()
+ {
+ // Arrange
+ var serviceCollection = new ServiceCollection();
+ serviceCollection.AddSingleton<IBoo, Boo>();
+
+ // Act + Assert
+ serviceCollection.BuildServiceProvider(new ServiceProviderOptions() { ValidateOnBuild = true, ValidateScopes = true });
+ }
+
+ [Fact]
+ public void BuildServiceProvider_ValidateOnBuild_Throws_WhenScopedIsInjectedIntoSingleton_CachedCallSites()
+ {
+ // Arrange
+ var serviceCollection = new ServiceCollection();
+ serviceCollection.AddScoped<Foo>();
+ serviceCollection.AddSingleton<Foo2>();
+ serviceCollection.AddScoped<IBar, Bar2>();
+ serviceCollection.AddScoped<IBaz, Baz>();
+
+ // Act + Assert
+ var aggregateException = Assert.Throws<AggregateException>(() => serviceCollection.BuildServiceProvider(new ServiceProviderOptions() { ValidateOnBuild = true, ValidateScopes = true }));
+ Assert.StartsWith("Some services are not able to be constructed", aggregateException.Message);
+ Assert.Equal(1, aggregateException.InnerExceptions.Count);
+ Assert.Equal("Error while validating the service descriptor 'ServiceType: Microsoft.Extensions.DependencyInjection.Tests.ServiceProviderValidationTests+Foo2 Lifetime: Singleton ImplementationType: Microsoft.Extensions.DependencyInjection.Tests.ServiceProviderValidationTests+Foo2': " +
+ "Cannot consume scoped service 'Microsoft.Extensions.DependencyInjection.Tests.ServiceProviderValidationTests+IBar' from singleton 'Microsoft.Extensions.DependencyInjection.Tests.ServiceProviderValidationTests+Foo2'."
+ , aggregateException.InnerExceptions[0].Message);
+ }
+
+ [Fact]
+ public void BuildServiceProvider_ValidateOnBuild_DoesNotThrow_CachedCallSites()
+ {
+ // Arrange
+ var serviceCollection = new ServiceCollection();
+ serviceCollection.AddScoped<Foo>();
+ serviceCollection.AddScoped<Foo2>();
+ serviceCollection.AddScoped<IBar, Bar2>();
+ serviceCollection.AddScoped<IBaz, Baz>();
+
+ // Act + Assert
+ serviceCollection.BuildServiceProvider(new ServiceProviderOptions() { ValidateOnBuild = true, ValidateScopes = true });
+ }
+
+ [Fact]
public void BuildServiceProvider_ValidateOnBuild_ThrowsForUnresolvableServices()
{
// Arrange
@@ -327,6 +405,13 @@ namespace Microsoft.Extensions.DependencyInjection.Tests
}
}
+ private class Foo2 : IFoo
+ {
+ public Foo2(IBar bar)
+ {
+ }
+ }
+
private interface IBar
{
}
diff --git a/src/libraries/System.Collections/src/System/Collections/BitArray.cs b/src/libraries/System.Collections/src/System/Collections/BitArray.cs
index 0ac3331b062f..0f54b8a714c7 100644
--- a/src/libraries/System.Collections/src/System/Collections/BitArray.cs
+++ b/src/libraries/System.Collections/src/System/Collections/BitArray.cs
@@ -756,21 +756,19 @@ namespace System.Collections
if (array is int[] intArray)
{
- Div32Rem(m_length, out int extraBits);
-
- if (extraBits == 0)
+ if (array.Length - index < GetInt32ArrayLengthFromBitLength(m_length))
{
- // we have perfect bit alignment, no need to sanitize, just copy
- Array.Copy(m_array, 0, intArray, index, m_array.Length);
+ throw new ArgumentException(SR.Argument_InvalidOffLen);
}
- else
- {
- int last = (m_length - 1) >> BitShiftPerInt32;
- // do not copy the last int, as it is not completely used
- Array.Copy(m_array, 0, intArray, index, last);
+ int quotient = Div32Rem(m_length, out int extraBits);
+
+ Array.Copy(m_array, 0, intArray, index, quotient);
+
+ if (extraBits > 0)
+ {
// the last int needs to be masked
- intArray[index + last] = m_array[last] & unchecked((1 << extraBits) - 1);
+ intArray[index + quotient] = m_array[quotient] & unchecked((1 << extraBits) - 1);
}
}
else if (array is byte[] byteArray)
diff --git a/src/libraries/System.Collections/src/System/Collections/Generic/PriorityQueue.cs b/src/libraries/System.Collections/src/System/Collections/Generic/PriorityQueue.cs
index edc1327b446c..5047b7643373 100644
--- a/src/libraries/System.Collections/src/System/Collections/Generic/PriorityQueue.cs
+++ b/src/libraries/System.Collections/src/System/Collections/Generic/PriorityQueue.cs
@@ -470,7 +470,7 @@ namespace System.Collections.Generic
if (_size == 0)
{
- // build using Heapify() if the queue is empty.
+ // If the queue is empty just append the elements since they all have the same priority.
int i = 0;
(TElement, TPriority)[] nodes = _nodes;
@@ -487,11 +487,6 @@ namespace System.Collections.Generic
_size = i;
_version++;
-
- if (i > 1)
- {
- Heapify();
- }
}
else
{
diff --git a/src/libraries/System.Collections/tests/BitArray/BitArray_GetSetTests.cs b/src/libraries/System.Collections/tests/BitArray/BitArray_GetSetTests.cs
index 28f3cb96dca9..912164e4efc5 100644
--- a/src/libraries/System.Collections/tests/BitArray/BitArray_GetSetTests.cs
+++ b/src/libraries/System.Collections/tests/BitArray/BitArray_GetSetTests.cs
@@ -396,6 +396,18 @@ namespace System.Collections.Tests
}
}
+ // https://github.com/dotnet/runtime/issues/98813
+ [Fact]
+ public static void CopyToIntArray_Regression98813()
+ {
+ BitArray bitArray = new BitArray(256);
+ bitArray.Length = 32;
+ int[] expectedOutput = new int[] { 0 };
+ int[] actualOutput = new int[1];
+ bitArray.CopyTo(actualOutput, 0);
+ Assert.Equal(expectedOutput, actualOutput);
+ }
+
// https://github.com/dotnet/runtime/issues/30440
[Fact]
public static void CopyToByteArray_Regression39929()
@@ -452,19 +464,13 @@ namespace System.Collections.Tests
[InlineData(default(int), BitsPerInt32, 1, 1)]
[InlineData(default(int), BitsPerInt32 * 4, 4 - 1, 0)]
[InlineData(default(int), BitsPerInt32 * 4, 4, 1)]
- public static void CopyTo_Size_Invalid<T>(T def, int bits, int arraySize, int index)
+ [InlineData(default(int), BitsPerInt32 + 1, 1, 0)]
+ public static void CopyTo_Size_Invalid<T>(T _, int bits, int arraySize, int index)
{
ICollection bitArray = new BitArray(bits);
T[] array = (T[])Array.CreateInstance(typeof(T), arraySize);
AssertExtensions.Throws<ArgumentOutOfRangeException>("index", () => bitArray.CopyTo(array, -1));
- if (def is int)
- {
- AssertExtensions.Throws<ArgumentException>("destinationArray", string.Empty, () => bitArray.CopyTo(array, index));
- }
- else
- {
- AssertExtensions.Throws<ArgumentException>(null, () => bitArray.CopyTo(array, index));
- }
+ AssertExtensions.Throws<ArgumentException>(null, () => bitArray.CopyTo(array, index));
}
[Fact]
diff --git a/src/libraries/System.Console/src/System/ConsolePal.Unix.cs b/src/libraries/System.Console/src/System/ConsolePal.Unix.cs
index 23d4b9ba595f..77a679150b29 100644
--- a/src/libraries/System.Console/src/System/ConsolePal.Unix.cs
+++ b/src/libraries/System.Console/src/System/ConsolePal.Unix.cs
@@ -355,19 +355,22 @@ namespace System
// Invalidate before reading cached values.
CheckTerminalSettingsInvalidated();
- Interop.Sys.WinSize winsize;
- if (s_windowWidth == -1 &&
- s_terminalHandle != null &&
- Interop.Sys.GetWindowSize(s_terminalHandle, out winsize) == 0)
+ if (s_windowWidth == -1)
{
- s_windowWidth = winsize.Col;
- s_windowHeight = winsize.Row;
- }
- else
- {
- s_windowWidth = TerminalFormatStringsInstance.Columns;
- s_windowHeight = TerminalFormatStringsInstance.Lines;
+ Interop.Sys.WinSize winsize;
+ if (s_terminalHandle != null &&
+ Interop.Sys.GetWindowSize(s_terminalHandle, out winsize) == 0)
+ {
+ s_windowWidth = winsize.Col;
+ s_windowHeight = winsize.Row;
+ }
+ else
+ {
+ s_windowWidth = TerminalFormatStringsInstance.Columns;
+ s_windowHeight = TerminalFormatStringsInstance.Lines;
+ }
}
+
width = s_windowWidth;
height = s_windowHeight;
}
diff --git a/src/libraries/System.Diagnostics.DiagnosticSource/src/DiagnosticSourceUsersGuide.md b/src/libraries/System.Diagnostics.DiagnosticSource/src/DiagnosticSourceUsersGuide.md
index 8f98d156cd1e..bcd4e96cb8aa 100644
--- a/src/libraries/System.Diagnostics.DiagnosticSource/src/DiagnosticSourceUsersGuide.md
+++ b/src/libraries/System.Diagnostics.DiagnosticSource/src/DiagnosticSourceUsersGuide.md
@@ -180,7 +180,7 @@ Thus the event names only need to be unique within a component.
reflection must be used to fetch fields). This is both easier to program and more efficient.
Thus in scenarios where there is likely high-volume filtering to be done by the logging listener, having
this type available to do the cast is valuable. Note that this type needs to be made public (since
- the listener needs to see it), and should be under the namespace System.Diagnostics.DiagnosticSource.PayloadTypes.
+ the listener needs to see it).
Note that if there is doubt about the value DO NOT create an explicit type, as you CAN convert from
an anonymous type to a explicit type compatibly in the future, but once you expose the payload type
you must keep it forever. The payload type should simply have C# 'TYPE NAME {get; set; }' properties
@@ -405,6 +405,21 @@ Thus we could replace the `listener.Subscribe()` call in the previous example wi
This very efficiently subscribes to only the 'RequestStart' events. All other events will cause the `DiagnosticSource.IsEnabled()`
method to return `false`, and thus be efficiently filtered out.
+NOTE: Filtering is only designed as a performance optimization. It is possible for a listener to receive events even when they
+do not satisfy the filter. This could occur because some other listener has subscribed to the event or because the source
+of the event didn't check IsEnabled() prior to sending it. If you want to be certain that a given event satisfies the filter
+you will need to check it inside the callback. For example:
+
+```C#
+Action<KeyValuePair<string, object>> callback = (KeyValuePair<string, object> evnt) =>
+ {
+ if(predicate(evnt.Key)) // only print out events that satisfy our filter
+ {
+ Console.WriteLine("From Listener {0} Received Event {1} with payload {2}", networkListener.Name, evnt.Key, evnt.Value.ToString());
+ }
+ };
+```
+
##### Context-based Filtering
Some scenarios require advanced filtering based on extended context.
Producers may call `DiagnosticSource.IsEnabled()` overloads and supply additional event properties:
diff --git a/src/libraries/System.Diagnostics.DiagnosticSource/src/System/Diagnostics/Metrics/MetricsEventSource.cs b/src/libraries/System.Diagnostics.DiagnosticSource/src/System/Diagnostics/Metrics/MetricsEventSource.cs
index 0b446b23ad35..3fbe68f55454 100644
--- a/src/libraries/System.Diagnostics.DiagnosticSource/src/System/Diagnostics/Metrics/MetricsEventSource.cs
+++ b/src/libraries/System.Diagnostics.DiagnosticSource/src/System/Diagnostics/Metrics/MetricsEventSource.cs
@@ -737,7 +737,11 @@ namespace System.Diagnostics.Metrics
StringBuilder sb = new StringBuilder();
for (int i = 0; i < quantiles.Length; i++)
{
- sb.Append(quantiles[i].Quantile).Append('=').Append(quantiles[i].Value);
+#if NETCOREAPP
+ sb.Append(CultureInfo.InvariantCulture, $"{quantiles[i].Quantile}={quantiles[i].Value}");
+#else
+ sb.AppendFormat(CultureInfo.InvariantCulture, "{0}={1}", quantiles[i].Quantile, quantiles[i].Value);
+#endif
if (i != quantiles.Length - 1)
{
sb.Append(';');
diff --git a/src/libraries/System.Diagnostics.DiagnosticSource/tests/MetricEventSourceTests.cs b/src/libraries/System.Diagnostics.DiagnosticSource/tests/MetricEventSourceTests.cs
index 0db3af5edb57..8eec15c60c79 100644
--- a/src/libraries/System.Diagnostics.DiagnosticSource/tests/MetricEventSourceTests.cs
+++ b/src/libraries/System.Diagnostics.DiagnosticSource/tests/MetricEventSourceTests.cs
@@ -1,7 +1,6 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-using System;
using System.Collections.Generic;
using System.Diagnostics.Tracing;
using System.Globalization;
@@ -9,7 +8,7 @@ using System.Linq;
using System.Runtime.CompilerServices;
using System.Text;
using System.Threading;
-using System.Threading.Tasks;
+using Microsoft.DotNet.RemoteExecutor;
using Xunit;
using Xunit.Abstractions;
@@ -659,45 +658,59 @@ namespace System.Diagnostics.Metrics.Tests
AssertInitialEnumerationCompleteEventPresent(events2);
}
- [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsNotBrowser))]
+ public static bool IsNotBrowserAndRemoteExecuteSupported => PlatformDetection.IsNotBrowser && RemoteExecutor.IsSupported;
+
+ [ConditionalFact(typeof(MetricEventSourceTests), nameof(IsNotBrowserAndRemoteExecuteSupported))]
[OuterLoop("Slow and has lots of console spew")]
public void EventSourcePublishesTimeSeriesWithEmptyMetadata()
{
- using Meter meter = new Meter("TestMeter1", null, new TagList() { { "Mk1", "Mv1" }, { "Mk2", "Mv2" } }, new object());
- Counter<int> c = meter.CreateCounter<int>("counter1");
- int counterState = 3;
- ObservableCounter<int> oc = meter.CreateObservableCounter<int>("observableCounter1", () => { counterState += 7; return counterState; });
- int gaugeState = 0;
- ObservableGauge<int> og = meter.CreateObservableGauge<int>("observableGauge1", () => { gaugeState += 9; return gaugeState; });
- Histogram<int> h = meter.CreateHistogram<int>("histogram1");
- UpDownCounter<int> udc = meter.CreateUpDownCounter<int>("upDownCounter1");
- int upDownCounterState = 0;
- ObservableUpDownCounter<int> oudc = meter.CreateObservableUpDownCounter<int>("observableUpDownCounter1", () => { upDownCounterState -= 11; return upDownCounterState; });
-
- EventWrittenEventArgs[] events;
- using (MetricsEventListener listener = new MetricsEventListener(_output, MetricsEventListener.TimeSeriesValues, IntervalSecs, "TestMeter1"))
+ RemoteExecutor.Invoke(static () =>
{
- listener.WaitForCollectionStop(s_waitForEventTimeout, 1);
- c.Add(5);
- h.Record(19);
- udc.Add(-33);
- listener.WaitForCollectionStop(s_waitForEventTimeout, 2);
- c.Add(12);
- h.Record(26);
- udc.Add(-40);
- listener.WaitForCollectionStop(s_waitForEventTimeout, 3);
- events = listener.Events.ToArray();
- }
+ CultureInfo.DefaultThreadCurrentCulture = new CultureInfo("fi-FI");
- AssertBeginInstrumentReportingEventsPresent(events, c, oc, og, h, udc, oudc);
- AssertInitialEnumerationCompleteEventPresent(events);
- AssertCounterEventsPresent(events, meter.Name, c.Name, "", "", ("5", "5"), ("12", "17"));
- AssertCounterEventsPresent(events, meter.Name, oc.Name, "", "", ("", "10"), ("7", "17"));
- AssertGaugeEventsPresent(events, meter.Name, og.Name, "", "", "9", "18");
- AssertHistogramEventsPresent(events, meter.Name, h.Name, "", "", ("0.5=19;0.95=19;0.99=19", "1", "19"), ("0.5=26;0.95=26;0.99=26", "1", "26"));
- AssertUpDownCounterEventsPresent(events, meter.Name, udc.Name, "", "", ("-33", "-33"), ("-40", "-73"));
- AssertUpDownCounterEventsPresent(events, meter.Name, oudc.Name, "", "", ("", "-11"), ("-11", "-22"));
- AssertCollectStartStopEventsPresent(events, IntervalSecs, 3);
+ using Meter meter = new Meter("TestMeter1", null, new TagList() { { "Mk1", "Mv1" }, { "Mk2", "Mv2" } }, new object());
+ Counter<int> c = meter.CreateCounter<int>("counter1");
+ int counterState = 3;
+ ObservableCounter<int> oc = meter.CreateObservableCounter<int>("observableCounter1", () => { counterState += 7; return counterState; });
+ int gaugeState = 0;
+ ObservableGauge<int> og = meter.CreateObservableGauge<int>("observableGauge1", () => { gaugeState += 9; return gaugeState; });
+ Histogram<int> h = meter.CreateHistogram<int>("histogram1");
+ UpDownCounter<int> udc = meter.CreateUpDownCounter<int>("upDownCounter1");
+ int upDownCounterState = 0;
+ ObservableUpDownCounter<int> oudc = meter.CreateObservableUpDownCounter<int>("observableUpDownCounter1", () => { upDownCounterState -= 11; return upDownCounterState; });
+
+ EventWrittenEventArgs[] events;
+ using (MetricsEventListener listener = new MetricsEventListener(NullTestOutputHelper.Instance, MetricsEventListener.TimeSeriesValues, IntervalSecs, "TestMeter1"))
+ {
+ listener.WaitForCollectionStop(s_waitForEventTimeout, 1);
+ c.Add(5);
+ h.Record(19);
+ udc.Add(-33);
+ listener.WaitForCollectionStop(s_waitForEventTimeout, 2);
+ c.Add(12);
+ h.Record(26);
+ udc.Add(-40);
+ listener.WaitForCollectionStop(s_waitForEventTimeout, 3);
+ events = listener.Events.ToArray();
+ }
+
+ AssertBeginInstrumentReportingEventsPresent(events, c, oc, og, h, udc, oudc);
+ AssertInitialEnumerationCompleteEventPresent(events);
+ AssertCounterEventsPresent(events, meter.Name, c.Name, "", "", ("5", "5"), ("12", "17"));
+ AssertCounterEventsPresent(events, meter.Name, oc.Name, "", "", ("", "10"), ("7", "17"));
+ AssertGaugeEventsPresent(events, meter.Name, og.Name, "", "", "9", "18");
+ AssertHistogramEventsPresent(events, meter.Name, h.Name, "", "", ("0.5=19;0.95=19;0.99=19", "1", "19"), ("0.5=26;0.95=26;0.99=26", "1", "26"));
+ AssertUpDownCounterEventsPresent(events, meter.Name, udc.Name, "", "", ("-33", "-33"), ("-40", "-73"));
+ AssertUpDownCounterEventsPresent(events, meter.Name, oudc.Name, "", "", ("", "-11"), ("-11", "-22"));
+ AssertCollectStartStopEventsPresent(events, IntervalSecs, 3);
+ }).Dispose();
+ }
+
+ private sealed class NullTestOutputHelper : ITestOutputHelper
+ {
+ public static NullTestOutputHelper Instance { get; } = new();
+ public void WriteLine(string message) { }
+ public void WriteLine(string format, params object[] args) { }
}
[ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsNotBrowser))]
@@ -1471,7 +1484,7 @@ namespace System.Diagnostics.Metrics.Tests
return sb.ToString();
}
- private void AssertBeginInstrumentReportingEventsPresent(EventWrittenEventArgs[] events, params Instrument[] expectedInstruments)
+ private static void AssertBeginInstrumentReportingEventsPresent(EventWrittenEventArgs[] events, params Instrument[] expectedInstruments)
{
var beginReportEvents = events.Where(e => e.EventName == "BeginInstrumentReporting").Select(e =>
new
@@ -1503,7 +1516,7 @@ namespace System.Diagnostics.Metrics.Tests
Assert.Equal(expectedInstruments.Length, beginReportEvents.Length);
}
- private void AssertEndInstrumentReportingEventsPresent(EventWrittenEventArgs[] events, params Instrument[] expectedInstruments)
+ private static void AssertEndInstrumentReportingEventsPresent(EventWrittenEventArgs[] events, params Instrument[] expectedInstruments)
{
var beginReportEvents = events.Where(e => e.EventName == "EndInstrumentReporting").Select(e =>
new
@@ -1535,27 +1548,27 @@ namespace System.Diagnostics.Metrics.Tests
Assert.Equal(expectedInstruments.Length, beginReportEvents.Length);
}
- private void AssertInitialEnumerationCompleteEventPresent(EventWrittenEventArgs[] events, int eventsCount = 1)
+ private static void AssertInitialEnumerationCompleteEventPresent(EventWrittenEventArgs[] events, int eventsCount = 1)
{
Assert.Equal(eventsCount, events.Where(e => e.EventName == "InitialInstrumentEnumerationComplete").Count());
}
- private void AssertTimeSeriesLimitPresent(EventWrittenEventArgs[] events)
+ private static void AssertTimeSeriesLimitPresent(EventWrittenEventArgs[] events)
{
Assert.Equal(1, events.Where(e => e.EventName == "TimeSeriesLimitReached").Count());
}
- private void AssertTimeSeriesLimitNotPresent(EventWrittenEventArgs[] events)
+ private static void AssertTimeSeriesLimitNotPresent(EventWrittenEventArgs[] events)
{
Assert.Equal(0, events.Where(e => e.EventName == "TimeSeriesLimitReached").Count());
}
- private void AssertHistogramLimitPresent(EventWrittenEventArgs[] events)
+ private static void AssertHistogramLimitPresent(EventWrittenEventArgs[] events)
{
Assert.Equal(1, events.Where(e => e.EventName == "HistogramLimitReached").Count());
}
- private void AssertInstrumentPublishingEventsPresent(EventWrittenEventArgs[] events, params Instrument[] expectedInstruments)
+ private static void AssertInstrumentPublishingEventsPresent(EventWrittenEventArgs[] events, params Instrument[] expectedInstruments)
{
var publishEvents = events.Where(e => e.EventName == "InstrumentPublished").Select(e =>
new
@@ -1587,19 +1600,19 @@ namespace System.Diagnostics.Metrics.Tests
Assert.Equal(expectedInstruments.Length, publishEvents.Length);
}
- private void AssertCounterEventsPresent(EventWrittenEventArgs[] events, string meterName, string instrumentName, string tags,
+ private static void AssertCounterEventsPresent(EventWrittenEventArgs[] events, string meterName, string instrumentName, string tags,
string expectedUnit, params (string, string)[] expected)
{
AssertGenericCounterEventsPresent("CounterRateValuePublished", events, meterName, instrumentName, tags, expectedUnit, expected);
}
- private void AssertUpDownCounterEventsPresent(EventWrittenEventArgs[] events, string meterName, string instrumentName, string tags,
+ private static void AssertUpDownCounterEventsPresent(EventWrittenEventArgs[] events, string meterName, string instrumentName, string tags,
string expectedUnit, params (string, string)[] expected)
{
AssertGenericCounterEventsPresent("UpDownCounterRateValuePublished", events, meterName, instrumentName, tags, expectedUnit, expected);
}
- private void AssertGenericCounterEventsPresent(string eventName, EventWrittenEventArgs[] events, string meterName, string instrumentName, string tags,
+ private static void AssertGenericCounterEventsPresent(string eventName, EventWrittenEventArgs[] events, string meterName, string instrumentName, string tags,
string expectedUnit, params (string, string)[] expected)
{
var counterEvents = events.Where(e => e.EventName == eventName).Select(e =>
@@ -1623,7 +1636,7 @@ namespace System.Diagnostics.Metrics.Tests
}
}
- private void AssertCounterEventsNotPresent(EventWrittenEventArgs[] events, string meterName, string instrumentName, string tags)
+ private static void AssertCounterEventsNotPresent(EventWrittenEventArgs[] events, string meterName, string instrumentName, string tags)
{
var counterEvents = events.Where(e => e.EventName == "CounterRateValuePublished").Select(e =>
new
@@ -1637,7 +1650,7 @@ namespace System.Diagnostics.Metrics.Tests
Assert.Equal(0, filteredEvents.Length);
}
- private void AssertGaugeEventsPresent(EventWrittenEventArgs[] events, string meterName, string instrumentName, string tags,
+ private static void AssertGaugeEventsPresent(EventWrittenEventArgs[] events, string meterName, string instrumentName, string tags,
string expectedUnit, params string[] expectedValues)
{
var counterEvents = events.Where(e => e.EventName == "GaugeValuePublished").Select(e =>
@@ -1659,7 +1672,7 @@ namespace System.Diagnostics.Metrics.Tests
}
}
- private void AssertHistogramEventsPresent(EventWrittenEventArgs[] events, string meterName, string instrumentName, string tags,
+ private static void AssertHistogramEventsPresent(EventWrittenEventArgs[] events, string meterName, string instrumentName, string tags,
string expectedUnit, params (string, string, string)[] expected)
{
var counterEvents = events.Where(e => e.EventName == "HistogramValuePublished").Select(e =>
@@ -1685,7 +1698,7 @@ namespace System.Diagnostics.Metrics.Tests
}
}
- private void AssertHistogramEventsNotPresent(EventWrittenEventArgs[] events, string meterName, string instrumentName, string tags)
+ private static void AssertHistogramEventsNotPresent(EventWrittenEventArgs[] events, string meterName, string instrumentName, string tags)
{
var counterEvents = events.Where(e => e.EventName == "HistogramValuePublished").Select(e =>
new
@@ -1698,7 +1711,7 @@ namespace System.Diagnostics.Metrics.Tests
var filteredEvents = counterEvents.Where(e => e.MeterName == meterName && e.InstrumentName == instrumentName && e.Tags == tags).ToArray();
Assert.Equal(0, filteredEvents.Length);
}
- private void AssertCollectStartStopEventsPresent(EventWrittenEventArgs[] events, double expectedIntervalSecs, int expectedPairs)
+ private static void AssertCollectStartStopEventsPresent(EventWrittenEventArgs[] events, double expectedIntervalSecs, int expectedPairs)
{
int startEventsSeen = 0;
int stopEventsSeen = 0;
@@ -1727,7 +1740,7 @@ namespace System.Diagnostics.Metrics.Tests
Assert.Equal(expectedPairs, stopEventsSeen);
}
- private void AssertObservableCallbackErrorPresent(EventWrittenEventArgs[] events)
+ private static void AssertObservableCallbackErrorPresent(EventWrittenEventArgs[] events)
{
var errorEvents = events.Where(e => e.EventName == "ObservableInstrumentCallbackError").Select(e =>
new
@@ -1738,7 +1751,7 @@ namespace System.Diagnostics.Metrics.Tests
Assert.Contains("Example user exception", errorEvents[0].ErrorText);
}
- private void AssertMultipleSessionsConfiguredIncorrectlyErrorEventsPresent(EventWrittenEventArgs[] events,
+ private static void AssertMultipleSessionsConfiguredIncorrectlyErrorEventsPresent(EventWrittenEventArgs[] events,
string expectedMaxHistograms, string actualMaxHistograms, string expectedMaxTimeSeries, string actualMaxTimeSeries,
string expectedRefreshInterval, string actualRefreshInterval)
{
diff --git a/src/libraries/System.IO.FileSystem.Watcher/src/System/IO/FileSystemWatcher.Linux.cs b/src/libraries/System.IO.FileSystem.Watcher/src/System/IO/FileSystemWatcher.Linux.cs
index 65dcefea25da..b09e0197e044 100644
--- a/src/libraries/System.IO.FileSystem.Watcher/src/System/IO/FileSystemWatcher.Linux.cs
+++ b/src/libraries/System.IO.FileSystem.Watcher/src/System/IO/FileSystemWatcher.Linux.cs
@@ -720,9 +720,9 @@ namespace System.IO
break;
case Interop.Sys.NotifyEvents.IN_MOVED_TO:
- if (previousEventName != null)
+ if (!previousEventName.IsEmpty)
{
- // If the previous name from IN_MOVED_FROM is non-null, then this is a rename.
+ // If the previous name from IN_MOVED_FROM is non-empty, then this is a rename.
watcher.NotifyRenameEventArgs(WatcherChangeTypes.Renamed, expandedName, previousEventName);
}
else
diff --git a/src/libraries/System.IO.Packaging/src/System/IO/Packaging/ContentType.cs b/src/libraries/System.IO.Packaging/src/System/IO/Packaging/ContentType.cs
index c6fe72e7f047..894fd4826aa8 100644
--- a/src/libraries/System.IO.Packaging/src/System/IO/Packaging/ContentType.cs
+++ b/src/libraries/System.IO.Packaging/src/System/IO/Packaging/ContentType.cs
@@ -351,8 +351,6 @@ namespace System.IO.Packaging
/// <returns></returns>
private static int GetLengthOfParameterValue(ReadOnlySpan<char> s, int startIndex)
{
- Debug.Assert(s != null);
-
int length;
//if the parameter value does not start with a '"' then,
diff --git a/src/libraries/System.Linq.Expressions/src/System/Linq/Expressions/Interpreter/InstructionList.cs b/src/libraries/System.Linq.Expressions/src/System/Linq/Expressions/Interpreter/InstructionList.cs
index c9d525a81a08..148e24f812e0 100644
--- a/src/libraries/System.Linq.Expressions/src/System/Linq/Expressions/Interpreter/InstructionList.cs
+++ b/src/libraries/System.Linq.Expressions/src/System/Linq/Expressions/Interpreter/InstructionList.cs
@@ -44,7 +44,6 @@ namespace System.Linq.Expressions.Interpreter
public DebugView(InstructionArray array)
{
- ArgumentNullException.ThrowIfNull(array);
_array = array;
}
diff --git a/src/libraries/System.Linq/System.Linq.sln b/src/libraries/System.Linq/System.Linq.sln
index a81e387e856b..b5283c72e434 100644
--- a/src/libraries/System.Linq/System.Linq.sln
+++ b/src/libraries/System.Linq/System.Linq.sln
@@ -1,4 +1,8 @@
-Microsoft Visual Studio Solution File, Format Version 12.00
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio Version 17
+VisualStudioVersion = 17.10.34618.27
+MinimumVisualStudioVersion = 10.0.40219.1
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TestUtilities", "..\Common\tests\TestUtilities\TestUtilities.csproj", "{AF1B1B01-A4EC-45F4-AE51-CC1FA7892181}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "System.Collections", "..\System.Collections\ref\System.Collections.csproj", "{3A8560D8-0E79-4BDE-802A-C96C7FE98258}"
@@ -35,11 +39,11 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{8CA90AB2-58B
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "gen", "gen", "{84E98F7C-FA2B-4048-AB7C-9FCDEA9CD37E}"
EndProject
-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "gen", "tools\gen", "{34793393-0347-438D-A832-2476F33C1BE3}"
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "gen", "gen", "{34793393-0347-438D-A832-2476F33C1BE3}"
EndProject
-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "tools\src", "{F8F69023-9ACD-4979-A710-39D16377AEEE}"
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{F8F69023-9ACD-4979-A710-39D16377AEEE}"
EndProject
-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "ref", "tools\ref", "{18C4E23D-AB0F-45E5-A6A1-A741F6462E85}"
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "ref", "ref", "{18C4E23D-AB0F-45E5-A6A1-A741F6462E85}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tools", "tools", "{0ADC596A-5B2E-4E5F-B5B5-DEB65A6C7E9D}"
EndProject
@@ -111,24 +115,28 @@ Global
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
{AF1B1B01-A4EC-45F4-AE51-CC1FA7892181} = {E291F4BF-7B8B-45AD-88F5-FB8B8380C126}
- {80A4051B-4A36-4A8B-BA43-A5AB8AA959F3} = {E291F4BF-7B8B-45AD-88F5-FB8B8380C126}
{3A8560D8-0E79-4BDE-802A-C96C7FE98258} = {7C5B49B9-F7D9-41FB-A8FA-94328BDDCCD1}
{7E4C1F09-B4F2-470E-9E7B-2C386E93D657} = {7C5B49B9-F7D9-41FB-A8FA-94328BDDCCD1}
- {D3160C37-FC48-4907-8F4A-F584ED12B275} = {7C5B49B9-F7D9-41FB-A8FA-94328BDDCCD1}
{14B966BB-CE23-4432-ADBB-89974389AC1D} = {8CA90AB2-58B9-45E7-A684-EDB60C6924B0}
+ {80A4051B-4A36-4A8B-BA43-A5AB8AA959F3} = {E291F4BF-7B8B-45AD-88F5-FB8B8380C126}
{9A13A12F-C924-43AF-94AF-6F1B33582D27} = {84E98F7C-FA2B-4048-AB7C-9FCDEA9CD37E}
{4BEC631E-B5FD-453F-82A0-C95C461798EA} = {84E98F7C-FA2B-4048-AB7C-9FCDEA9CD37E}
{C8F0459C-15D5-4624-8CE4-E93ADF96A28C} = {84E98F7C-FA2B-4048-AB7C-9FCDEA9CD37E}
+ {D3160C37-FC48-4907-8F4A-F584ED12B275} = {7C5B49B9-F7D9-41FB-A8FA-94328BDDCCD1}
{E0CA3ED5-EE6C-4F7C-BCE7-EFB1D64A9CD1} = {34793393-0347-438D-A832-2476F33C1BE3}
{3EFB74E7-616A-48C1-B43B-3F89AA5013E6} = {34793393-0347-438D-A832-2476F33C1BE3}
- {34793393-0347-438D-A832-2476F33C1BE3} = {0ADC596A-5B2E-4E5F-B5B5-DEB65A6C7E9D}
{28ABC524-ACEE-4183-A64A-49E3DC830595} = {F8F69023-9ACD-4979-A710-39D16377AEEE}
{721DB3D9-8221-424E-BE29-084CDD20D26E} = {F8F69023-9ACD-4979-A710-39D16377AEEE}
- {F8F69023-9ACD-4979-A710-39D16377AEEE} = {0ADC596A-5B2E-4E5F-B5B5-DEB65A6C7E9D}
{E19B8772-2DBD-4274-8190-F3CC0242A1C0} = {18C4E23D-AB0F-45E5-A6A1-A741F6462E85}
+ {34793393-0347-438D-A832-2476F33C1BE3} = {0ADC596A-5B2E-4E5F-B5B5-DEB65A6C7E9D}
+ {F8F69023-9ACD-4979-A710-39D16377AEEE} = {0ADC596A-5B2E-4E5F-B5B5-DEB65A6C7E9D}
{18C4E23D-AB0F-45E5-A6A1-A741F6462E85} = {0ADC596A-5B2E-4E5F-B5B5-DEB65A6C7E9D}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {A4970D79-BF1C-4343-9070-B409DBB69F93}
EndGlobalSection
+ GlobalSection(SharedMSBuildProjectFiles) = preSolution
+ ..\..\tools\illink\src\ILLink.Shared\ILLink.Shared.projitems*{3efb74e7-616a-48c1-b43b-3f89aa5013e6}*SharedItemsImports = 5
+ ..\..\tools\illink\src\ILLink.Shared\ILLink.Shared.projitems*{721db3d9-8221-424e-be29-084cdd20d26e}*SharedItemsImports = 5
+ EndGlobalSection
EndGlobal
diff --git a/src/libraries/System.Linq/src/CompatibilitySuppressions.xml b/src/libraries/System.Linq/src/CompatibilitySuppressions.xml
deleted file mode 100644
index 0f5e8063636b..000000000000
--- a/src/libraries/System.Linq/src/CompatibilitySuppressions.xml
+++ /dev/null
@@ -1,8 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Suppressions xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
- <!-- Exposed publicly in the implementation to enable reflection for data-binding scenarios. -->
- <Suppression>
- <DiagnosticId>CP0001</DiagnosticId>
- <Target>T:System.Linq.Grouping`2</Target>
- </Suppression>
-</Suppressions> \ No newline at end of file
diff --git a/src/libraries/System.Linq/src/System.Linq.csproj b/src/libraries/System.Linq/src/System.Linq.csproj
index 2fc4153be689..7c656e5b50d0 100644
--- a/src/libraries/System.Linq/src/System.Linq.csproj
+++ b/src/libraries/System.Linq/src/System.Linq.csproj
@@ -9,6 +9,7 @@
<PropertyGroup>
<TargetPlatformIdentifier>$([MSBuild]::GetTargetPlatformIdentifier('$(TargetFramework)'))</TargetPlatformIdentifier>
<OptimizeForSize Condition="'$(TargetPlatformIdentifier)' == 'browser' or '$(TargetPlatformIdentifier)' == 'android' or '$(TargetPlatformIdentifier)' == 'ios' or '$(TargetPlatformIdentifier)' == 'tvos'">true</OptimizeForSize>
+ <DefineConstants Condition="'$(OptimizeForSize)' == 'true'">$(DefineConstants);OPTIMIZE_FOR_SIZE</DefineConstants>
</PropertyGroup>
<ItemGroup Condition="'$(OptimizeForSize)' == true">
@@ -18,19 +19,21 @@
<ItemGroup Condition="'$(OptimizeForSize)' != true">
<Compile Include="System\Linq\AppendPrepend.SpeedOpt.cs" />
+ <Compile Include="System\Linq\Cast.SpeedOpt.cs" />
<Compile Include="System\Linq\Concat.SpeedOpt.cs" />
<Compile Include="System\Linq\DefaultIfEmpty.SpeedOpt.cs" />
<Compile Include="System\Linq\Distinct.SpeedOpt.cs" />
<Compile Include="System\Linq\Grouping.SpeedOpt.cs" />
+ <Compile Include="System\Linq\Iterator.SpeedOpt.cs" />
<Compile Include="System\Linq\Lookup.SpeedOpt.cs" />
<Compile Include="System\Linq\OrderedEnumerable.SpeedOpt.cs" />
- <Compile Include="System\Linq\Partition.SpeedOpt.cs" />
<Compile Include="System\Linq\Range.SpeedOpt.cs" />
<Compile Include="System\Linq\Repeat.SpeedOpt.cs" />
<Compile Include="System\Linq\Reverse.SpeedOpt.cs" />
<Compile Include="System\Linq\Select.SpeedOpt.cs" />
<Compile Include="System\Linq\SelectMany.SpeedOpt.cs" />
<Compile Include="System\Linq\Skip.SpeedOpt.cs" />
+ <Compile Include="System\Linq\SkipTake.SpeedOpt.cs" />
<Compile Include="System\Linq\Take.SpeedOpt.cs" />
<Compile Include="System\Linq\Union.SpeedOpt.cs" />
<Compile Include="System\Linq\Where.SpeedOpt.cs" />
@@ -60,8 +63,6 @@
<Compile Include="System\Linq\Index.cs" />
<Compile Include="System\Linq\Intersect.cs" />
<Compile Include="System\Linq\Iterator.cs" />
- <Compile Include="System\Linq\IIListProvider.cs" />
- <Compile Include="System\Linq\IPartition.cs" />
<Compile Include="System\Linq\Join.cs" />
<Compile Include="System\Linq\Last.cs" />
<Compile Include="System\Linq\Lookup.cs" />
diff --git a/src/libraries/System.Linq/src/System/Linq/AppendPrepend.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/AppendPrepend.SpeedOpt.cs
index 80ee23998603..cfd6ea795274 100644
--- a/src/libraries/System.Linq/src/System/Linq/AppendPrepend.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/AppendPrepend.SpeedOpt.cs
@@ -8,15 +8,6 @@ namespace System.Linq
{
public static partial class Enumerable
{
- private abstract partial class AppendPrependIterator<TSource> : IIListProvider<TSource>
- {
- public abstract TSource[] ToArray();
-
- public abstract List<TSource> ToList();
-
- public abstract int GetCount(bool onlyIfCheap);
- }
-
private sealed partial class AppendPrepend1Iterator<TSource>
{
private TSource[] LazyToArray()
@@ -130,14 +121,63 @@ namespace System.Linq
public override int GetCount(bool onlyIfCheap)
{
- if (_source is IIListProvider<TSource> listProv)
+ if (_source is Iterator<TSource> iterator)
{
- int count = listProv.GetCount(onlyIfCheap);
+ int count = iterator.GetCount(onlyIfCheap);
return count == -1 ? -1 : count + 1;
}
return !onlyIfCheap || _source is ICollection<TSource> ? _source.Count() + 1 : -1;
}
+
+ public override TSource? TryGetFirst(out bool found)
+ {
+ if (_appending)
+ {
+ TSource? first = _source.TryGetFirst(out found);
+ if (found)
+ {
+ return first;
+ }
+ }
+
+ found = true;
+ return _item;
+ }
+
+ public override TSource? TryGetLast(out bool found)
+ {
+ if (!_appending)
+ {
+ TSource? last = _source.TryGetLast(out found);
+ if (found)
+ {
+ return last;
+ }
+ }
+
+ found = true;
+ return _item;
+ }
+
+ public override TSource? TryGetElementAt(int index, out bool found)
+ {
+ if (!_appending)
+ {
+ if (index == 0)
+ {
+ found = true;
+ return _item;
+ }
+
+ index--;
+ return
+ _source is Iterator<TSource> iterator ? iterator.TryGetElementAt(index, out found) :
+ TryGetElementAtNonIterator(_source, index, out found);
+ }
+
+ return base.TryGetElementAt(index, out found);
+ }
}
private sealed partial class AppendPrependN<TSource>
@@ -232,9 +272,9 @@ namespace System.Linq
public override int GetCount(bool onlyIfCheap)
{
- if (_source is IIListProvider<TSource> listProv)
+ if (_source is Iterator<TSource> iterator)
{
- int count = listProv.GetCount(onlyIfCheap);
+ int count = iterator.GetCount(onlyIfCheap);
return count == -1 ? -1 : count + _appendCount + _prependCount;
}
diff --git a/src/libraries/System.Linq/src/System/Linq/Cast.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/Cast.SpeedOpt.cs
new file mode 100644
index 000000000000..3cded1625e8d
--- /dev/null
+++ b/src/libraries/System.Linq/src/System/Linq/Cast.SpeedOpt.cs
@@ -0,0 +1,115 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Collections;
+using System.Collections.Generic;
+
+namespace System.Linq
+{
+ public static partial class Enumerable
+ {
+ private sealed partial class CastICollectionIterator<TResult>
+ {
+ public override int GetCount(bool onlyIfCheap) => _source.Count;
+
+ public override TResult[] ToArray()
+ {
+ TResult[] array = new TResult[_source.Count];
+
+ int index = 0;
+ foreach (TResult item in _source)
+ {
+ array[index++] = item;
+ }
+
+ return array;
+ }
+
+ public override List<TResult> ToList()
+ {
+ List<TResult> list = new(_source.Count);
+
+ foreach (TResult item in _source)
+ {
+ list.Add(item);
+ }
+
+ return list;
+ }
+
+ public override TResult? TryGetElementAt(int index, out bool found)
+ {
+ if (index >= 0)
+ {
+ IEnumerator e = _source.GetEnumerator();
+ try
+ {
+ while (e.MoveNext())
+ {
+ if (index == 0)
+ {
+ found = true;
+ return (TResult)e.Current;
+ }
+
+ index--;
+ }
+ }
+ finally
+ {
+ (e as IDisposable)?.Dispose();
+ }
+ }
+
+ found = false;
+ return default;
+ }
+
+ public override TResult? TryGetFirst(out bool found)
+ {
+ IEnumerator e = _source.GetEnumerator();
+ try
+ {
+ if (e.MoveNext())
+ {
+ found = true;
+ return (TResult)e.Current;
+ }
+ }
+ finally
+ {
+ (e as IDisposable)?.Dispose();
+ }
+
+ found = false;
+ return default;
+ }
+
+ public override TResult? TryGetLast(out bool found)
+ {
+ IEnumerator e = _source.GetEnumerator();
+ try
+ {
+ if (e.MoveNext())
+ {
+ TResult last = (TResult)e.Current;
+ while (e.MoveNext())
+ {
+ last = (TResult)e.Current;
+ }
+
+ found = true;
+ return last;
+ }
+
+ found = false;
+ return default;
+ }
+ finally
+ {
+ (e as IDisposable)?.Dispose();
+ }
+ }
+ }
+ }
+}
diff --git a/src/libraries/System.Linq/src/System/Linq/Cast.cs b/src/libraries/System.Linq/src/System/Linq/Cast.cs
index 0c20609b3eb7..7af5ad058eb0 100644
--- a/src/libraries/System.Linq/src/System/Linq/Cast.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Cast.cs
@@ -3,6 +3,7 @@
using System.Collections;
using System.Collections.Generic;
+using System.Diagnostics;
namespace System.Linq
{
@@ -45,6 +46,11 @@ namespace System.Linq
ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source);
}
+ if (source is ICollection collection)
+ {
+ return new CastICollectionIterator<TResult>(collection);
+ }
+
return CastIterator<TResult>(source);
}
@@ -55,5 +61,46 @@ namespace System.Linq
yield return (TResult)obj;
}
}
+
+ [DebuggerDisplay("Count = {Count}")]
+ private sealed partial class CastICollectionIterator<TResult>(ICollection source) : Iterator<TResult>
+ {
+ private readonly ICollection _source = source;
+ private IEnumerator? _enumerator;
+
+ public override Iterator<TResult> Clone() => new CastICollectionIterator<TResult>(_source);
+
+ public override bool MoveNext()
+ {
+ switch (_state)
+ {
+ case 1:
+ _enumerator = _source.GetEnumerator();
+ _state = 2;
+ goto case 2;
+
+ case 2:
+ Debug.Assert(_enumerator != null);
+ if (_enumerator.MoveNext())
+ {
+ _current = (TResult)_enumerator.Current;
+ return true;
+ }
+
+ Dispose();
+ break;
+ }
+
+ return false;
+ }
+
+ public override void Dispose()
+ {
+ (_enumerator as IDisposable)?.Dispose();
+ _enumerator = null;
+
+ base.Dispose();
+ }
+ }
}
}
diff --git a/src/libraries/System.Linq/src/System/Linq/Concat.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/Concat.SpeedOpt.cs
index a5ad64f78584..6ee051c00c27 100644
--- a/src/libraries/System.Linq/src/System/Linq/Concat.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Concat.SpeedOpt.cs
@@ -342,13 +342,9 @@ namespace System.Linq
}
}
- private abstract partial class ConcatIterator<TSource> : IPartition<TSource>
+ private abstract partial class ConcatIterator<TSource>
{
- public abstract int GetCount(bool onlyIfCheap);
-
- public abstract TSource[] ToArray();
-
- public List<TSource> ToList()
+ public override List<TSource> ToList()
{
int count = GetCount(onlyIfCheap: true);
var list = count != -1 ? new List<TSource>(count) : new List<TSource>();
@@ -367,16 +363,6 @@ namespace System.Linq
return list;
}
- public abstract TSource? TryGetElementAt(int index, out bool found);
-
- public abstract TSource? TryGetFirst(out bool found);
-
- public abstract TSource? TryGetLast(out bool found);
-
- public IPartition<TSource>? Skip(int count) => new EnumerablePartition<TSource>(this, count, -1);
-
- public IPartition<TSource>? Take(int count) => new EnumerablePartition<TSource>(this, 0, count - 1);
-
}
}
}
diff --git a/src/libraries/System.Linq/src/System/Linq/Count.cs b/src/libraries/System.Linq/src/System/Linq/Count.cs
index 14f3d457f6ea..048ebc0891e7 100644
--- a/src/libraries/System.Linq/src/System/Linq/Count.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Count.cs
@@ -20,10 +20,12 @@ namespace System.Linq
return collectionoft.Count;
}
- if (source is IIListProvider<TSource> listProv)
+#if !OPTIMIZE_FOR_SIZE
+ if (source is Iterator<TSource> iterator)
{
- return listProv.GetCount(onlyIfCheap: false);
+ return iterator.GetCount(onlyIfCheap: false);
}
+#endif
if (source is ICollection collection)
{
@@ -105,15 +107,17 @@ namespace System.Linq
return true;
}
- if (source is IIListProvider<TSource> listProv)
+#if !OPTIMIZE_FOR_SIZE
+ if (source is Iterator<TSource> iterator)
{
- int c = listProv.GetCount(onlyIfCheap: true);
+ int c = iterator.GetCount(onlyIfCheap: true);
if (c >= 0)
{
count = c;
return true;
}
}
+#endif
if (source is ICollection collection)
{
diff --git a/src/libraries/System.Linq/src/System/Linq/DefaultIfEmpty.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/DefaultIfEmpty.SpeedOpt.cs
index 24619cc43813..c89d6797581e 100644
--- a/src/libraries/System.Linq/src/System/Linq/DefaultIfEmpty.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/DefaultIfEmpty.SpeedOpt.cs
@@ -8,15 +8,15 @@ namespace System.Linq
{
public static partial class Enumerable
{
- private sealed partial class DefaultIfEmptyIterator<TSource> : IIListProvider<TSource>
+ private sealed partial class DefaultIfEmptyIterator<TSource>
{
- public TSource[] ToArray()
+ public override TSource[] ToArray()
{
TSource[] array = _source.ToArray();
return array.Length == 0 ? [_default] : array;
}
- public List<TSource> ToList()
+ public override List<TSource> ToList()
{
List<TSource> list = _source.ToList();
if (list.Count == 0)
@@ -27,7 +27,7 @@ namespace System.Linq
return list;
}
- public int GetCount(bool onlyIfCheap)
+ public override int GetCount(bool onlyIfCheap)
{
int count;
if (!onlyIfCheap || _source is ICollection<TSource> || _source is ICollection)
@@ -36,11 +36,51 @@ namespace System.Linq
}
else
{
- count = _source is IIListProvider<TSource> listProv ? listProv.GetCount(onlyIfCheap: true) : -1;
+ count = _source is Iterator<TSource> iterator ? iterator.GetCount(onlyIfCheap: true) : -1;
}
return count == 0 ? 1 : count;
}
+
+ public override TSource? TryGetFirst(out bool found)
+ {
+ TSource? first = _source.TryGetFirst(out found);
+ if (found)
+ {
+ return first;
+ }
+
+ found = true;
+ return _default;
+ }
+
+ public override TSource? TryGetLast(out bool found)
+ {
+ TSource? last = _source.TryGetLast(out found);
+ if (found)
+ {
+ return last;
+ }
+
+ found = true;
+ return _default;
+ }
+
+ public override TSource? TryGetElementAt(int index, out bool found)
+ {
+ TSource? item = _source.TryGetElementAt(index, out found);
+ if (found)
+ {
+ return item;
+ }
+
+ if (index == 0)
+ {
+ found = true;
+ }
+
+ return _default;
+ }
}
}
}
diff --git a/src/libraries/System.Linq/src/System/Linq/DefaultIfEmpty.cs b/src/libraries/System.Linq/src/System/Linq/DefaultIfEmpty.cs
index 593a6b8a67b3..b4bef955c078 100644
--- a/src/libraries/System.Linq/src/System/Linq/DefaultIfEmpty.cs
+++ b/src/libraries/System.Linq/src/System/Linq/DefaultIfEmpty.cs
@@ -18,6 +18,11 @@ namespace System.Linq
ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source);
}
+ if (source is TSource[] { Length: > 0 })
+ {
+ return source;
+ }
+
return new DefaultIfEmptyIterator<TSource>(source, defaultValue);
}
diff --git a/src/libraries/System.Linq/src/System/Linq/Distinct.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/Distinct.SpeedOpt.cs
index 70e96b7ed68f..a3dbb6458969 100644
--- a/src/libraries/System.Linq/src/System/Linq/Distinct.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Distinct.SpeedOpt.cs
@@ -7,13 +7,15 @@ namespace System.Linq
{
public static partial class Enumerable
{
- private sealed partial class DistinctIterator<TSource> : IIListProvider<TSource>
+ private sealed partial class DistinctIterator<TSource>
{
- public TSource[] ToArray() => Enumerable.HashSetToArray(new HashSet<TSource>(_source, _comparer));
+ public override TSource[] ToArray() => HashSetToArray(new HashSet<TSource>(_source, _comparer));
- public List<TSource> ToList() => new List<TSource>(new HashSet<TSource>(_source, _comparer));
+ public override List<TSource> ToList() => new List<TSource>(new HashSet<TSource>(_source, _comparer));
- public int GetCount(bool onlyIfCheap) => onlyIfCheap ? -1 : new HashSet<TSource>(_source, _comparer).Count;
+ public override int GetCount(bool onlyIfCheap) => onlyIfCheap ? -1 : new HashSet<TSource>(_source, _comparer).Count;
+
+ public override TSource? TryGetFirst(out bool found) => _source.TryGetFirst(out found);
}
}
}
diff --git a/src/libraries/System.Linq/src/System/Linq/ElementAt.cs b/src/libraries/System.Linq/src/System/Linq/ElementAt.cs
index b33fcaddff92..0466f7d9694b 100644
--- a/src/libraries/System.Linq/src/System/Linq/ElementAt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/ElementAt.cs
@@ -16,25 +16,13 @@ namespace System.Linq
ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source);
}
- if (source is IPartition<TSource> partition)
+ TSource? element = TryGetElementAt(source, index, out bool found, guardIListLength: false);
+ if (!found)
{
- TSource? element = partition.TryGetElementAt(index, out bool found);
- if (found)
- {
- return element!;
- }
- }
- else if (source is IList<TSource> list)
- {
- return list[index];
- }
- else if (TryGetElement(source, index, out TSource? element))
- {
- return element;
+ ThrowHelper.ThrowArgumentOutOfRangeException(ExceptionArgument.index);
}
- ThrowHelper.ThrowArgumentOutOfRangeException(ExceptionArgument.index);
- return default;
+ return element!;
}
/// <summary>Returns the element at a specified index in a sequence.</summary>
@@ -80,18 +68,7 @@ namespace System.Linq
ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source);
}
- if (source is IPartition<TSource> partition)
- {
- return partition.TryGetElementAt(index, out bool _);
- }
-
- if (source is IList<TSource> list)
- {
- return (uint)index < (uint)list.Count ? list[index] : default;
- }
-
- TryGetElement(source, index, out TSource? element);
- return element;
+ return TryGetElementAt(source, index, out _);
}
/// <summary>Returns the element at a specified index in a sequence or a default value if the index is out of range.</summary>
@@ -125,27 +102,47 @@ namespace System.Linq
return element;
}
- private static bool TryGetElement<TSource>(IEnumerable<TSource> source, int index, [MaybeNullWhen(false)] out TSource element)
+ private static TSource? TryGetElementAt<TSource>(this IEnumerable<TSource> source, int index, out bool found, bool guardIListLength = true) =>
+#if !OPTIMIZE_FOR_SIZE
+ source is Iterator<TSource> iterator ? iterator.TryGetElementAt(index, out found) :
+#endif
+ TryGetElementAtNonIterator(source, index, out found, guardIListLength);
+
+ private static TSource? TryGetElementAtNonIterator<TSource>(IEnumerable<TSource> source, int index, out bool found, bool guardIListLength = true)
{
Debug.Assert(source != null);
- if (index >= 0)
+ if (source is IList<TSource> list)
+ {
+ // Historically, ElementAt would simply delegate to IList[int] without first checking the bounds.
+ // That in turn meant that whatever exception the IList[int] throws for out-of-bounds access would
+ // propagate, e.g. ImmutableArray throws IndexOutOfRangeException whereas List throws ArgumentOutOfRangeException.
+ // Other uses of this, though, do need to guard, such as ElementAtOrDefault and all the various
+ // internal TryGetElementAt helpers. So, we have a guardIListLength parameter to allow the caller
+ // to specify whether to guard or not.
+ if (!guardIListLength || (uint)index < (uint)list.Count)
+ {
+ found = true;
+ return list[index];
+ }
+ }
+ else if (index >= 0)
{
using IEnumerator<TSource> e = source.GetEnumerator();
while (e.MoveNext())
{
if (index == 0)
{
- element = e.Current;
- return true;
+ found = true;
+ return e.Current;
}
index--;
}
}
- element = default;
- return false;
+ found = false;
+ return default;
}
private static bool TryGetElementFromEnd<TSource>(IEnumerable<TSource> source, int indexFromEnd, [MaybeNullWhen(false)] out TSource element)
diff --git a/src/libraries/System.Linq/src/System/Linq/First.cs b/src/libraries/System.Linq/src/System/Linq/First.cs
index 1c62f547d9a0..6879be5fc3c8 100644
--- a/src/libraries/System.Linq/src/System/Linq/First.cs
+++ b/src/libraries/System.Linq/src/System/Linq/First.cs
@@ -2,7 +2,7 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
-using System.Diagnostics.CodeAnalysis;
+using System.Diagnostics;
namespace System.Linq
{
@@ -69,11 +69,15 @@ namespace System.Linq
ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source);
}
- if (source is IPartition<TSource> partition)
- {
- return partition.TryGetFirst(out found);
- }
+ return
+#if !OPTIMIZE_FOR_SIZE
+ source is Iterator<TSource> iterator ? iterator.TryGetFirst(out found) :
+#endif
+ TryGetFirstNonIterator(source, out found);
+ }
+ private static TSource? TryGetFirstNonIterator<TSource>(IEnumerable<TSource> source, out bool found)
+ {
if (source is IList<TSource> list)
{
if (list.Count > 0)
diff --git a/src/libraries/System.Linq/src/System/Linq/Grouping.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/Grouping.SpeedOpt.cs
index 97e3b1152139..d081a09380f9 100644
--- a/src/libraries/System.Linq/src/System/Linq/Grouping.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Grouping.SpeedOpt.cs
@@ -5,63 +5,54 @@ using System.Collections.Generic;
namespace System.Linq
{
- internal sealed partial class GroupedResultEnumerable<TSource, TKey, TElement, TResult> : IIListProvider<TResult>
+ public static partial class Enumerable
{
- public TResult[] ToArray() =>
- Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer).ToArray(_resultSelector);
-
- public List<TResult> ToList() =>
- Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer).ToList(_resultSelector);
+ private sealed partial class GroupByResultIterator<TSource, TKey, TElement, TResult>
+ {
+ public override TResult[] ToArray() =>
+ Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer).ToArray(_resultSelector);
- public int GetCount(bool onlyIfCheap) =>
- onlyIfCheap ? -1 : Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer).Count;
- }
+ public override List<TResult> ToList() =>
+ Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer).ToList(_resultSelector);
- internal sealed partial class GroupedResultEnumerable<TSource, TKey, TResult> : IIListProvider<TResult>
- {
- public TResult[] ToArray() =>
- Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer).ToArray(_resultSelector);
+ public override int GetCount(bool onlyIfCheap) =>
+ onlyIfCheap ? -1 : Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer).Count;
+ }
- public List<TResult> ToList() =>
- Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer).ToList(_resultSelector);
+ private sealed partial class GroupByResultIterator<TSource, TKey, TResult>
+ {
+ public override TResult[] ToArray() =>
+ Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer).ToArray(_resultSelector);
- public int GetCount(bool onlyIfCheap) =>
- onlyIfCheap ? -1 : Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer).Count;
- }
+ public override List<TResult> ToList() =>
+ Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer).ToList(_resultSelector);
- internal sealed partial class GroupedEnumerable<TSource, TKey, TElement> : IIListProvider<IGrouping<TKey, TElement>>
- {
- public IGrouping<TKey, TElement>[] ToArray()
- {
- IIListProvider<IGrouping<TKey, TElement>> lookup = Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer);
- return lookup.ToArray();
+ public override int GetCount(bool onlyIfCheap) =>
+ onlyIfCheap ? -1 : Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer).Count;
}
- public List<IGrouping<TKey, TElement>> ToList()
+ private sealed partial class GroupByIterator<TSource, TKey, TElement>
{
- IIListProvider<IGrouping<TKey, TElement>> lookup = Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer);
- return lookup.ToList();
- }
+ public override IGrouping<TKey, TElement>[] ToArray() =>
+ Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer).ToArray();
- public int GetCount(bool onlyIfCheap) =>
- onlyIfCheap ? -1 : Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer).Count;
- }
+ public override List<IGrouping<TKey, TElement>> ToList() =>
+ Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer).ToList();
- internal sealed partial class GroupedEnumerable<TSource, TKey> : IIListProvider<IGrouping<TKey, TSource>>
- {
- public IGrouping<TKey, TSource>[] ToArray()
- {
- IIListProvider<IGrouping<TKey, TSource>> lookup = Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer);
- return lookup.ToArray();
+ public override int GetCount(bool onlyIfCheap) =>
+ onlyIfCheap ? -1 : Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer).Count;
}
- public List<IGrouping<TKey, TSource>> ToList()
+ private sealed partial class GroupByIterator<TSource, TKey>
{
- IIListProvider<IGrouping<TKey, TSource>> lookup = Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer);
- return lookup.ToList();
- }
+ public override IGrouping<TKey, TSource>[] ToArray() =>
+ Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer).ToArray();
- public int GetCount(bool onlyIfCheap) =>
- onlyIfCheap ? -1 : Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer).Count;
+ public override List<IGrouping<TKey, TSource>> ToList() =>
+ Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer).ToList();
+
+ public override int GetCount(bool onlyIfCheap) =>
+ onlyIfCheap ? -1 : Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer).Count;
+ }
}
}
diff --git a/src/libraries/System.Linq/src/System/Linq/Grouping.cs b/src/libraries/System.Linq/src/System/Linq/Grouping.cs
index 958642f624d0..b81b7909d4b8 100644
--- a/src/libraries/System.Linq/src/System/Linq/Grouping.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Grouping.cs
@@ -29,7 +29,7 @@ namespace System.Linq
return [];
}
- return new GroupedEnumerable<TSource, TKey>(source, keySelector, comparer);
+ return new GroupByIterator<TSource, TKey>(source, keySelector, comparer);
}
public static IEnumerable<IGrouping<TKey, TElement>> GroupBy<TSource, TKey, TElement>(this IEnumerable<TSource> source, Func<TSource, TKey> keySelector, Func<TSource, TElement> elementSelector) =>
@@ -57,7 +57,7 @@ namespace System.Linq
return [];
}
- return new GroupedEnumerable<TSource, TKey, TElement>(source, keySelector, elementSelector, comparer);
+ return new GroupByIterator<TSource, TKey, TElement>(source, keySelector, elementSelector, comparer);
}
public static IEnumerable<TResult> GroupBy<TSource, TKey, TResult>(this IEnumerable<TSource> source, Func<TSource, TKey> keySelector, Func<TKey, IEnumerable<TSource>, TResult> resultSelector) =>
@@ -85,7 +85,7 @@ namespace System.Linq
return [];
}
- return new GroupedResultEnumerable<TSource, TKey, TResult>(source, keySelector, resultSelector, comparer);
+ return new GroupByResultIterator<TSource, TKey, TResult>(source, keySelector, resultSelector, comparer);
}
public static IEnumerable<TResult> GroupBy<TSource, TKey, TElement, TResult>(this IEnumerable<TSource> source, Func<TSource, TKey> keySelector, Func<TSource, TElement> elementSelector, Func<TKey, IEnumerable<TElement>, TResult> resultSelector) =>
@@ -118,7 +118,229 @@ namespace System.Linq
return [];
}
- return new GroupedResultEnumerable<TSource, TKey, TElement, TResult>(source, keySelector, elementSelector, resultSelector, comparer);
+ return new GroupByResultIterator<TSource, TKey, TElement, TResult>(source, keySelector, elementSelector, resultSelector, comparer);
+ }
+
+ private sealed partial class GroupByResultIterator<TSource, TKey, TElement, TResult> : Iterator<TResult>
+ {
+ private readonly IEnumerable<TSource> _source;
+ private readonly Func<TSource, TKey> _keySelector;
+ private readonly Func<TSource, TElement> _elementSelector;
+ private readonly IEqualityComparer<TKey>? _comparer;
+ private readonly Func<TKey, IEnumerable<TElement>, TResult> _resultSelector;
+
+ private Lookup<TKey, TElement>? _lookup;
+ private Grouping<TKey, TElement>? _g;
+
+ public GroupByResultIterator(IEnumerable<TSource> source, Func<TSource, TKey> keySelector, Func<TSource, TElement> elementSelector, Func<TKey, IEnumerable<TElement>, TResult> resultSelector, IEqualityComparer<TKey>? comparer)
+ {
+ _source = source;
+ _keySelector = keySelector;
+ _elementSelector = elementSelector;
+ _comparer = comparer;
+ _resultSelector = resultSelector;
+ }
+
+ public override Iterator<TResult> Clone() => new GroupByResultIterator<TSource, TKey, TElement, TResult>(_source, _keySelector, _elementSelector, _resultSelector, _comparer);
+
+ public override bool MoveNext()
+ {
+ switch (_state)
+ {
+ case 1:
+ _lookup = Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer);
+ _g = _lookup._lastGrouping;
+ if (_g is not null)
+ {
+ _state = 2;
+ goto ValidItem;
+ }
+ break;
+
+ case 2:
+ Debug.Assert(_g is not null);
+ Debug.Assert(_lookup is not null);
+ if (_g != _lookup._lastGrouping)
+ {
+ goto ValidItem;
+ }
+ break;
+ }
+
+ Dispose();
+ return false;
+
+ ValidItem:
+ _g = _g._next;
+ Debug.Assert(_g is not null);
+ _g.Trim();
+ _current = _resultSelector(_g.Key, _g._elements);
+ return true;
+ }
+ }
+
+ private sealed partial class GroupByResultIterator<TSource, TKey, TResult> : Iterator<TResult>
+ {
+ private readonly IEnumerable<TSource> _source;
+ private readonly Func<TSource, TKey> _keySelector;
+ private readonly IEqualityComparer<TKey>? _comparer;
+ private readonly Func<TKey, IEnumerable<TSource>, TResult> _resultSelector;
+
+ private Lookup<TKey, TSource>? _lookup;
+ private Grouping<TKey, TSource>? _g;
+
+ public GroupByResultIterator(IEnumerable<TSource> source, Func<TSource, TKey> keySelector, Func<TKey, IEnumerable<TSource>, TResult> resultSelector, IEqualityComparer<TKey>? comparer)
+ {
+ _source = source;
+ _keySelector = keySelector;
+ _resultSelector = resultSelector;
+ _comparer = comparer;
+ }
+
+ public override Iterator<TResult> Clone() => new GroupByResultIterator<TSource, TKey, TResult>(_source, _keySelector, _resultSelector, _comparer);
+
+ public override bool MoveNext()
+ {
+ switch (_state)
+ {
+ case 1:
+ _lookup = Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer);
+ _g = _lookup._lastGrouping;
+ if (_g is not null)
+ {
+ _state = 2;
+ goto ValidItem;
+ }
+ break;
+
+ case 2:
+ Debug.Assert(_g is not null);
+ Debug.Assert(_lookup is not null);
+ if (_g != _lookup._lastGrouping)
+ {
+ goto ValidItem;
+ }
+ break;
+ }
+
+ Dispose();
+ return false;
+
+ ValidItem:
+ _g = _g._next;
+ Debug.Assert(_g is not null);
+ _g.Trim();
+ _current = _resultSelector(_g.Key, _g._elements);
+ return true;
+ }
+ }
+
+ private sealed partial class GroupByIterator<TSource, TKey, TElement> : Iterator<IGrouping<TKey, TElement>>
+ {
+ private readonly IEnumerable<TSource> _source;
+ private readonly Func<TSource, TKey> _keySelector;
+ private readonly Func<TSource, TElement> _elementSelector;
+ private readonly IEqualityComparer<TKey>? _comparer;
+
+ private Lookup<TKey, TElement>? _lookup;
+ private Grouping<TKey, TElement>? _g;
+
+ public GroupByIterator(IEnumerable<TSource> source, Func<TSource, TKey> keySelector, Func<TSource, TElement> elementSelector, IEqualityComparer<TKey>? comparer)
+ {
+ _source = source;
+ _keySelector = keySelector;
+ _elementSelector = elementSelector;
+ _comparer = comparer;
+ }
+
+ public override Iterator<IGrouping<TKey, TElement>> Clone() => new GroupByIterator<TSource, TKey, TElement>(_source, _keySelector, _elementSelector, _comparer);
+
+ public override bool MoveNext()
+ {
+ switch (_state)
+ {
+ case 1:
+ _lookup = Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer);
+ _g = _lookup._lastGrouping;
+ if (_g is not null)
+ {
+ _state = 2;
+ goto ValidItem;
+ }
+ break;
+
+ case 2:
+ Debug.Assert(_g is not null);
+ Debug.Assert(_lookup is not null);
+ if (_g != _lookup._lastGrouping)
+ {
+ goto ValidItem;
+ }
+ break;
+ }
+
+ Dispose();
+ return false;
+
+ ValidItem:
+ _g = _g._next;
+ Debug.Assert(_g is not null);
+ _current = _g;
+ return true;
+ }
+ }
+
+ private sealed partial class GroupByIterator<TSource, TKey> : Iterator<IGrouping<TKey, TSource>>
+ {
+ private readonly IEnumerable<TSource> _source;
+ private readonly Func<TSource, TKey> _keySelector;
+ private readonly IEqualityComparer<TKey>? _comparer;
+
+ private Lookup<TKey, TSource>? _lookup;
+ private Grouping<TKey, TSource>? _g;
+
+ public GroupByIterator(IEnumerable<TSource> source, Func<TSource, TKey> keySelector, IEqualityComparer<TKey>? comparer)
+ {
+ _source = source;
+ _keySelector = keySelector;
+ _comparer = comparer;
+ }
+
+ public override Iterator<IGrouping<TKey, TSource>> Clone() => new GroupByIterator<TSource, TKey>(_source, _keySelector, _comparer);
+
+ public override bool MoveNext()
+ {
+ switch (_state)
+ {
+ case 1:
+ _lookup = Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer);
+ _g = _lookup._lastGrouping;
+ if (_g is not null)
+ {
+ _state = 2;
+ goto ValidItem;
+ }
+ break;
+
+ case 2:
+ Debug.Assert(_g is not null);
+ Debug.Assert(_lookup is not null);
+ if (_g != _lookup._lastGrouping)
+ {
+ goto ValidItem;
+ }
+ break;
+ }
+
+ Dispose();
+ return false;
+
+ ValidItem:
+ _g = _g._next;
+ Debug.Assert(_g is not null);
+ _current = _g;
+ return true;
+ }
}
}
@@ -127,15 +349,9 @@ namespace System.Linq
TKey Key { get; }
}
- // It is (unfortunately) common to databind directly to Grouping.Key.
- // Because of this, we have to declare this internal type public so that we
- // can mark the Key property for public reflection.
- //
- // To limit the damage, the toolchain makes this type appear in a hidden assembly.
- // (This is also why it is no longer a nested type of Lookup<,>).
[DebuggerDisplay("Key = {Key}")]
[DebuggerTypeProxy(typeof(SystemLinq_GroupingDebugView<,>))]
- public class Grouping<TKey, TElement> : IGrouping<TKey, TElement>, IList<TElement>
+ internal sealed class Grouping<TKey, TElement> : IGrouping<TKey, TElement>, IList<TElement>
{
internal readonly TKey _key;
internal readonly int _hashCode;
@@ -180,8 +396,6 @@ namespace System.Linq
IEnumerator IEnumerable.GetEnumerator() => GetEnumerator();
- // DDB195907: implement IGrouping<>.Key implicitly
- // so that WPF binding works on this property.
public TKey Key => _key;
int ICollection<TElement>.Count => _count;
@@ -227,94 +441,4 @@ namespace System.Linq
}
}
}
-
- internal sealed partial class GroupedResultEnumerable<TSource, TKey, TElement, TResult> : IEnumerable<TResult>
- {
- private readonly IEnumerable<TSource> _source;
- private readonly Func<TSource, TKey> _keySelector;
- private readonly Func<TSource, TElement> _elementSelector;
- private readonly IEqualityComparer<TKey>? _comparer;
- private readonly Func<TKey, IEnumerable<TElement>, TResult> _resultSelector;
-
- public GroupedResultEnumerable(IEnumerable<TSource> source, Func<TSource, TKey> keySelector, Func<TSource, TElement> elementSelector, Func<TKey, IEnumerable<TElement>, TResult> resultSelector, IEqualityComparer<TKey>? comparer)
- {
- _source = source;
- _keySelector = keySelector;
- _elementSelector = elementSelector;
- _comparer = comparer;
- _resultSelector = resultSelector;
- }
-
- public IEnumerator<TResult> GetEnumerator()
- {
- Lookup<TKey, TElement> lookup = Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer);
- return lookup.ApplyResultSelector(_resultSelector).GetEnumerator();
- }
-
- IEnumerator IEnumerable.GetEnumerator() => GetEnumerator();
- }
-
- internal sealed partial class GroupedResultEnumerable<TSource, TKey, TResult> : IEnumerable<TResult>
- {
- private readonly IEnumerable<TSource> _source;
- private readonly Func<TSource, TKey> _keySelector;
- private readonly IEqualityComparer<TKey>? _comparer;
- private readonly Func<TKey, IEnumerable<TSource>, TResult> _resultSelector;
-
- public GroupedResultEnumerable(IEnumerable<TSource> source, Func<TSource, TKey> keySelector, Func<TKey, IEnumerable<TSource>, TResult> resultSelector, IEqualityComparer<TKey>? comparer)
- {
- _source = source;
- _keySelector = keySelector;
- _resultSelector = resultSelector;
- _comparer = comparer;
- }
-
- public IEnumerator<TResult> GetEnumerator()
- {
- Lookup<TKey, TSource> lookup = Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer);
- return lookup.ApplyResultSelector(_resultSelector).GetEnumerator();
- }
-
- IEnumerator IEnumerable.GetEnumerator() => GetEnumerator();
- }
-
- internal sealed partial class GroupedEnumerable<TSource, TKey, TElement> : IEnumerable<IGrouping<TKey, TElement>>
- {
- private readonly IEnumerable<TSource> _source;
- private readonly Func<TSource, TKey> _keySelector;
- private readonly Func<TSource, TElement> _elementSelector;
- private readonly IEqualityComparer<TKey>? _comparer;
-
- public GroupedEnumerable(IEnumerable<TSource> source, Func<TSource, TKey> keySelector, Func<TSource, TElement> elementSelector, IEqualityComparer<TKey>? comparer)
- {
- _source = source;
- _keySelector = keySelector;
- _elementSelector = elementSelector;
- _comparer = comparer;
- }
-
- public IEnumerator<IGrouping<TKey, TElement>> GetEnumerator() =>
- Lookup<TKey, TElement>.Create(_source, _keySelector, _elementSelector, _comparer).GetEnumerator();
-
- IEnumerator IEnumerable.GetEnumerator() => GetEnumerator();
- }
-
- internal sealed partial class GroupedEnumerable<TSource, TKey> : IEnumerable<IGrouping<TKey, TSource>>
- {
- private readonly IEnumerable<TSource> _source;
- private readonly Func<TSource, TKey> _keySelector;
- private readonly IEqualityComparer<TKey>? _comparer;
-
- public GroupedEnumerable(IEnumerable<TSource> source, Func<TSource, TKey> keySelector, IEqualityComparer<TKey>? comparer)
- {
- _source = source;
- _keySelector = keySelector;
- _comparer = comparer;
- }
-
- public IEnumerator<IGrouping<TKey, TSource>> GetEnumerator() =>
- Lookup<TKey, TSource>.Create(_source, _keySelector, _comparer).GetEnumerator();
-
- IEnumerator IEnumerable.GetEnumerator() => GetEnumerator();
- }
}
diff --git a/src/libraries/System.Linq/src/System/Linq/IIListProvider.cs b/src/libraries/System.Linq/src/System/Linq/IIListProvider.cs
deleted file mode 100644
index 9eefc6e61e0c..000000000000
--- a/src/libraries/System.Linq/src/System/Linq/IIListProvider.cs
+++ /dev/null
@@ -1,33 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System.Collections.Generic;
-
-namespace System.Linq
-{
- /// <summary>
- /// An iterator that can produce an array or <see cref="List{TElement}"/> through an optimized path.
- /// </summary>
- internal interface IIListProvider<TElement> : IEnumerable<TElement>
- {
- /// <summary>
- /// Produce an array of the sequence through an optimized path.
- /// </summary>
- /// <returns>The array.</returns>
- TElement[] ToArray();
-
- /// <summary>
- /// Produce a <see cref="List{TElement}"/> of the sequence through an optimized path.
- /// </summary>
- /// <returns>The <see cref="List{TElement}"/>.</returns>
- List<TElement> ToList();
-
- /// <summary>
- /// Returns the count of elements in the sequence.
- /// </summary>
- /// <param name="onlyIfCheap">If true then the count should only be calculated if doing
- /// so is quick (sure or likely to be constant time), otherwise -1 should be returned.</param>
- /// <returns>The number of elements.</returns>
- int GetCount(bool onlyIfCheap);
- }
-}
diff --git a/src/libraries/System.Linq/src/System/Linq/IPartition.cs b/src/libraries/System.Linq/src/System/Linq/IPartition.cs
deleted file mode 100644
index 86db1921b12f..000000000000
--- a/src/libraries/System.Linq/src/System/Linq/IPartition.cs
+++ /dev/null
@@ -1,47 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-namespace System.Linq
-{
- /// <summary>
- /// An iterator that supports random access and can produce a partial sequence of its items through an optimized path.
- /// </summary>
- internal interface IPartition<TElement> : IIListProvider<TElement>
- {
- /// <summary>
- /// Creates a new partition that skips the specified number of elements from this sequence.
- /// </summary>
- /// <param name="count">The number of elements to skip.</param>
- /// <returns>An <see cref="IPartition{TElement}"/> with the first <paramref name="count"/> items removed, or null if known empty.</returns>
- IPartition<TElement>? Skip(int count);
-
- /// <summary>
- /// Creates a new partition that takes the specified number of elements from this sequence.
- /// </summary>
- /// <param name="count">The number of elements to take.</param>
- /// <returns>An <see cref="IPartition{TElement}"/> with only the first <paramref name="count"/> items, or null if known empty.</returns>
- IPartition<TElement>? Take(int count);
-
- /// <summary>
- /// Gets the item associated with a 0-based index in this sequence.
- /// </summary>
- /// <param name="index">The 0-based index to access.</param>
- /// <param name="found"><c>true</c> if the sequence contains an element at that index, <c>false</c> otherwise.</param>
- /// <returns>The element if <paramref name="found"/> is <c>true</c>, otherwise, the default value of <typeparamref name="TElement"/>.</returns>
- TElement? TryGetElementAt(int index, out bool found);
-
- /// <summary>
- /// Gets the first item in this sequence.
- /// </summary>
- /// <param name="found"><c>true</c> if the sequence contains an element, <c>false</c> otherwise.</param>
- /// <returns>The element if <paramref name="found"/> is <c>true</c>, otherwise, the default value of <typeparamref name="TElement"/>.</returns>
- TElement? TryGetFirst(out bool found);
-
- /// <summary>
- /// Gets the last item in this sequence.
- /// </summary>
- /// <param name="found"><c>true</c> if the sequence contains an element, <c>false</c> otherwise.</param>
- /// <returns>The element if <paramref name="found"/> is <c>true</c>, otherwise, the default value of <typeparamref name="TElement"/>.</returns>
- TElement? TryGetLast(out bool found);
- }
-}
diff --git a/src/libraries/System.Linq/src/System/Linq/Iterator.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/Iterator.SpeedOpt.cs
new file mode 100644
index 000000000000..d641faa93e3a
--- /dev/null
+++ b/src/libraries/System.Linq/src/System/Linq/Iterator.SpeedOpt.cs
@@ -0,0 +1,71 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Collections.Generic;
+
+namespace System.Linq
+{
+ public static partial class Enumerable
+ {
+ internal abstract partial class Iterator<TSource>
+ {
+ /// <summary>
+ /// Produce an array of the sequence through an optimized path.
+ /// </summary>
+ /// <returns>The array.</returns>
+ public abstract TSource[] ToArray();
+
+ /// <summary>
+ /// Produce a <see cref="List{TSource}"/> of the sequence through an optimized path.
+ /// </summary>
+ /// <returns>The <see cref="List{TSource}"/>.</returns>
+ public abstract List<TSource> ToList();
+
+ /// <summary>
+ /// Returns the count of elements in the sequence.
+ /// </summary>
+ /// <param name="onlyIfCheap">If true then the count should only be calculated if doing
+ /// so is quick (sure or likely to be constant time), otherwise -1 should be returned.</param>
+ /// <returns>The number of elements.</returns>
+ public abstract int GetCount(bool onlyIfCheap);
+
+ /// <summary>
+ /// Creates a new iterator that skips the specified number of elements from this sequence.
+ /// </summary>
+ /// <param name="count">The number of elements to skip.</param>
+ /// <returns>An <see cref="Iterator{TSource}"/> with the first <paramref name="count"/> items removed, or null if known empty.</returns>
+ public virtual Iterator<TSource>? Skip(int count) => new IEnumerableSkipTakeIterator<TSource>(this, count, -1);
+
+ /// <summary>
+ /// Creates a new iterator that takes the specified number of elements from this sequence.
+ /// </summary>
+ /// <param name="count">The number of elements to take.</param>
+ /// <returns>An <see cref="Iterator{TSource}"/> with only the first <paramref name="count"/> items, or null if known empty.</returns>
+ public virtual Iterator<TSource>? Take(int count) => new IEnumerableSkipTakeIterator<TSource>(this, 0, count - 1);
+
+ /// <summary>
+ /// Gets the item associated with a 0-based index in this sequence.
+ /// </summary>
+ /// <param name="index">The 0-based index to access.</param>
+ /// <param name="found"><c>true</c> if the sequence contains an element at that index, <c>false</c> otherwise.</param>
+ /// <returns>The element if <paramref name="found"/> is <c>true</c>, otherwise, the default value of <typeparamref name="TSource"/>.</returns>
+ public virtual TSource? TryGetElementAt(int index, out bool found) =>
+ index == 0 ? TryGetFirst(out found) :
+ TryGetElementAtNonIterator(this, index, out found);
+
+ /// <summary>
+ /// Gets the first item in this sequence.
+ /// </summary>
+ /// <param name="found"><c>true</c> if the sequence contains an element, <c>false</c> otherwise.</param>
+ /// <returns>The element if <paramref name="found"/> is <c>true</c>, otherwise, the default value of <typeparamref name="TSource"/>.</returns>
+ public virtual TSource? TryGetFirst(out bool found) => TryGetFirstNonIterator(this, out found);
+
+ /// <summary>
+ /// Gets the last item in this sequence.
+ /// </summary>
+ /// <param name="found"><c>true</c> if the sequence contains an element, <c>false</c> otherwise.</param>
+ /// <returns>The element if <paramref name="found"/> is <c>true</c>, otherwise, the default value of <typeparamref name="TSource"/>.</returns>
+ public virtual TSource? TryGetLast(out bool found) => TryGetLastNonIterator(this, out found);
+ }
+ }
+}
diff --git a/src/libraries/System.Linq/src/System/Linq/Iterator.cs b/src/libraries/System.Linq/src/System/Linq/Iterator.cs
index b9e8c7b58c05..4738d2343069 100644
--- a/src/libraries/System.Linq/src/System/Linq/Iterator.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Iterator.cs
@@ -28,21 +28,14 @@ namespace System.Linq
/// </description></item>
/// </list>
/// </remarks>
- internal abstract class Iterator<TSource> : IEnumerable<TSource>, IEnumerator<TSource>
+ internal abstract partial class Iterator<TSource> : IEnumerable<TSource>, IEnumerator<TSource>
{
- private readonly int _threadId;
+ private readonly int _threadId = Environment.CurrentManagedThreadId;
+
internal int _state;
internal TSource _current = default!;
/// <summary>
- /// Initializes a new instance of the <see cref="Iterator{TSource}"/> class.
- /// </summary>
- protected Iterator()
- {
- _threadId = Environment.CurrentManagedThreadId;
- }
-
- /// <summary>
/// The item currently yielded by this iterator.
/// </summary>
public TSource Current => _current;
@@ -94,19 +87,20 @@ namespace System.Linq
/// </summary>
/// <typeparam name="TResult">The type of the mapped items.</typeparam>
/// <param name="selector">The selector used to map each item.</param>
- public virtual IEnumerable<TResult> Select<TResult>(Func<TSource, TResult> selector)
- {
- return new SelectEnumerableIterator<TSource, TResult>(this, selector);
- }
+ public virtual IEnumerable<TResult> Select<TResult>(Func<TSource, TResult> selector) =>
+#if OPTIMIZE_FOR_SIZE
+ new IEnumerableSelectIterator<TSource, TResult>(this, selector);
+#else
+ new IteratorSelectIterator<TSource, TResult>(this, selector);
+#endif
+
/// <summary>
/// Returns an enumerable that filters each item in this iterator based on a predicate.
/// </summary>
/// <param name="predicate">The predicate used to filter each item.</param>
- public virtual IEnumerable<TSource> Where(Func<TSource, bool> predicate)
- {
- return new WhereEnumerableIterator<TSource>(this, predicate);
- }
+ public virtual IEnumerable<TSource> Where(Func<TSource, bool> predicate) =>
+ new IEnumerableWhereIterator<TSource>(this, predicate);
object? IEnumerator.Current => Current;
diff --git a/src/libraries/System.Linq/src/System/Linq/Last.cs b/src/libraries/System.Linq/src/System/Linq/Last.cs
index 568f0d8670fa..e7052c2b48dd 100644
--- a/src/libraries/System.Linq/src/System/Linq/Last.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Last.cs
@@ -68,11 +68,15 @@ namespace System.Linq
ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source);
}
- if (source is IPartition<TSource> partition)
- {
- return partition.TryGetLast(out found);
- }
+ return
+#if !OPTIMIZE_FOR_SIZE
+ source is Iterator<TSource> iterator ? iterator.TryGetLast(out found) :
+#endif
+ TryGetLastNonIterator(source, out found);
+ }
+ private static TSource? TryGetLastNonIterator<TSource>(IEnumerable<TSource> source, out bool found)
+ {
if (source is IList<TSource> list)
{
int count = list.Count;
@@ -117,7 +121,7 @@ namespace System.Linq
ThrowHelper.ThrowArgumentNullException(ExceptionArgument.predicate);
}
- if (source is OrderedEnumerable<TSource> ordered)
+ if (source is OrderedIterator<TSource> ordered)
{
return ordered.TryGetLast(predicate, out found);
}
diff --git a/src/libraries/System.Linq/src/System/Linq/Lookup.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/Lookup.SpeedOpt.cs
index 16a9d7e0a3f4..3a50ac59fd4f 100644
--- a/src/libraries/System.Linq/src/System/Linq/Lookup.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Lookup.SpeedOpt.cs
@@ -6,23 +6,8 @@ using System.Diagnostics;
namespace System.Linq
{
- public partial class Lookup<TKey, TElement> : IIListProvider<IGrouping<TKey, TElement>>
+ public partial class Lookup<TKey, TElement>
{
- IGrouping<TKey, TElement>[] IIListProvider<IGrouping<TKey, TElement>>.ToArray()
- {
- IGrouping<TKey, TElement>[] array;
- if (_count > 0)
- {
- array = new IGrouping<TKey, TElement>[_count];
- Fill(_lastGrouping, array);
- }
- else
- {
- array = [];
- }
- return array;
- }
-
internal TResult[] ToArray<TResult>(Func<TKey, IEnumerable<TElement>, TResult> resultSelector)
{
TResult[] array = new TResult[_count];
@@ -44,38 +29,5 @@ namespace System.Linq
return array;
}
-
- List<IGrouping<TKey, TElement>> IIListProvider<IGrouping<TKey, TElement>>.ToList()
- {
- var list = new List<IGrouping<TKey, TElement>>(_count);
- if (_count > 0)
- {
- Fill(_lastGrouping, Enumerable.SetCountAndGetSpan(list, _count));
- }
-
- return list;
- }
-
- private static void Fill(Grouping<TKey, TElement>? lastGrouping, Span<IGrouping<TKey, TElement>> results)
- {
- int index = 0;
- Grouping<TKey, TElement>? g = lastGrouping;
- if (g != null)
- {
- do
- {
- g = g._next;
- Debug.Assert(g != null);
-
- results[index] = g;
- ++index;
- }
- while (g != lastGrouping);
- }
-
- Debug.Assert(index == results.Length, "All list elements were not initialized.");
- }
-
- int IIListProvider<IGrouping<TKey, TElement>>.GetCount(bool onlyIfCheap) => _count;
}
}
diff --git a/src/libraries/System.Linq/src/System/Linq/Lookup.cs b/src/libraries/System.Linq/src/System/Linq/Lookup.cs
index 055bf6c61018..2b7fbb20b859 100644
--- a/src/libraries/System.Linq/src/System/Linq/Lookup.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Lookup.cs
@@ -76,7 +76,7 @@ namespace System.Linq
{
private readonly IEqualityComparer<TKey> _comparer;
private Grouping<TKey, TElement>[] _groupings;
- private Grouping<TKey, TElement>? _lastGrouping;
+ internal Grouping<TKey, TElement>? _lastGrouping;
private int _count;
internal static Lookup<TKey, TElement> Create<TSource>(IEnumerable<TSource> source, Func<TSource, TKey> keySelector, Func<TSource, TElement> elementSelector, IEqualityComparer<TKey>? comparer)
@@ -85,7 +85,7 @@ namespace System.Linq
Debug.Assert(keySelector != null);
Debug.Assert(elementSelector != null);
- Lookup<TKey, TElement> lookup = new Lookup<TKey, TElement>(comparer);
+ var lookup = new CollectionLookup<TKey, TElement>(comparer);
foreach (TSource item in source)
{
lookup.GetGrouping(keySelector(item), create: true)!.Add(elementSelector(item));
@@ -99,7 +99,7 @@ namespace System.Linq
Debug.Assert(source != null);
Debug.Assert(keySelector != null);
- Lookup<TKey, TElement> lookup = new Lookup<TKey, TElement>(comparer);
+ var lookup = new CollectionLookup<TKey, TElement>(comparer);
foreach (TElement item in source)
{
lookup.GetGrouping(keySelector(item), create: true)!.Add(item);
@@ -110,7 +110,7 @@ namespace System.Linq
internal static Lookup<TKey, TElement> CreateForJoin(IEnumerable<TElement> source, Func<TElement, TKey> keySelector, IEqualityComparer<TKey>? comparer)
{
- Lookup<TKey, TElement> lookup = new Lookup<TKey, TElement>(comparer);
+ var lookup = new CollectionLookup<TKey, TElement>(comparer);
foreach (TElement item in source)
{
TKey key = keySelector(item);
@@ -123,7 +123,7 @@ namespace System.Linq
return lookup;
}
- private Lookup(IEqualityComparer<TKey>? comparer)
+ private protected Lookup(IEqualityComparer<TKey>? comparer)
{
_comparer = comparer ?? EqualityComparer<TKey>.Default;
_groupings = new Grouping<TKey, TElement>[7];
@@ -259,16 +259,68 @@ namespace System.Linq
}
}
+ internal sealed class CollectionLookup<TKey, TElement> : Lookup<TKey, TElement>, ICollection<IGrouping<TKey, TElement>>, IReadOnlyCollection<IGrouping<TKey, TElement>>
+ {
+ internal CollectionLookup(IEqualityComparer<TKey>? comparer) : base(comparer) { }
+
+ void ICollection<IGrouping<TKey, TElement>>.CopyTo(IGrouping<TKey, TElement>[] array, int arrayIndex)
+ {
+ ArgumentNullException.ThrowIfNull(array);
+ ArgumentOutOfRangeException.ThrowIfNegative(arrayIndex);
+ ArgumentOutOfRangeException.ThrowIfGreaterThan(arrayIndex, array.Length);
+ ArgumentOutOfRangeException.ThrowIfLessThan(array.Length - arrayIndex, Count, nameof(arrayIndex));
+
+ Grouping<TKey, TElement>? g = _lastGrouping;
+ if (g != null)
+ {
+ do
+ {
+ g = g._next;
+ Debug.Assert(g != null);
+
+ array[arrayIndex] = g;
+ ++arrayIndex;
+ }
+ while (g != _lastGrouping);
+ }
+ }
+
+ bool ICollection<IGrouping<TKey, TElement>>.Contains(IGrouping<TKey, TElement> item)
+ {
+ ArgumentNullException.ThrowIfNull(item);
+ return GetGrouping(item.Key, create: false) is { } grouping && grouping == item;
+ }
+
+ bool ICollection<IGrouping<TKey, TElement>>.IsReadOnly => true;
+ void ICollection<IGrouping<TKey, TElement>>.Add(IGrouping<TKey, TElement> item) => throw new NotSupportedException();
+ void ICollection<IGrouping<TKey, TElement>>.Clear() => throw new NotSupportedException();
+ bool ICollection<IGrouping<TKey, TElement>>.Remove(IGrouping<TKey, TElement> item) => throw new NotSupportedException();
+ }
+
[DebuggerDisplay("Count = 0")]
[DebuggerTypeProxy(typeof(SystemLinq_LookupDebugView<,>))]
- internal sealed class EmptyLookup<TKey, TElement> : ILookup<TKey, TElement>
+ internal sealed class EmptyLookup<TKey, TElement> : ILookup<TKey, TElement>, ICollection<IGrouping<TKey, TElement>>, IReadOnlyCollection<IGrouping<TKey, TElement>>
{
public static readonly EmptyLookup<TKey, TElement> Instance = new();
public IEnumerable<TElement> this[TKey key] => [];
public int Count => 0;
- public bool Contains(TKey key) => false;
+
public IEnumerator<IGrouping<TKey, TElement>> GetEnumerator() => Enumerable.Empty<IGrouping<TKey, TElement>>().GetEnumerator();
IEnumerator IEnumerable.GetEnumerator() => GetEnumerator();
+
+ public bool Contains(TKey key) => false;
+ public bool Contains(IGrouping<TKey, TElement> item) => false;
+ public void CopyTo(IGrouping<TKey, TElement>[] array, int arrayIndex)
+ {
+ ArgumentNullException.ThrowIfNull(array);
+ ArgumentOutOfRangeException.ThrowIfNegative(arrayIndex);
+ ArgumentOutOfRangeException.ThrowIfGreaterThan(arrayIndex, array.Length);
+ }
+
+ public bool IsReadOnly => true;
+ public void Add(IGrouping<TKey, TElement> item) => throw new NotSupportedException();
+ public void Clear() => throw new NotSupportedException();
+ public bool Remove(IGrouping<TKey, TElement> item) => throw new NotSupportedException();
}
}
diff --git a/src/libraries/System.Linq/src/System/Linq/OrderBy.cs b/src/libraries/System.Linq/src/System/Linq/OrderBy.cs
index aa7a08ee81c9..700410fe579f 100644
--- a/src/libraries/System.Linq/src/System/Linq/OrderBy.cs
+++ b/src/libraries/System.Linq/src/System/Linq/OrderBy.cs
@@ -44,14 +44,14 @@ namespace System.Linq
/// </remarks>
public static IOrderedEnumerable<T> Order<T>(this IEnumerable<T> source, IComparer<T>? comparer) =>
TypeIsImplicitlyStable<T>() && (comparer is null || comparer == Comparer<T>.Default) ?
- new OrderedImplicitlyStableEnumerable<T>(source, descending: false) :
+ new ImplicitlyStableOrderedIterator<T>(source, descending: false) :
OrderBy(source, EnumerableSorter<T>.IdentityFunc, comparer);
public static IOrderedEnumerable<TSource> OrderBy<TSource, TKey>(this IEnumerable<TSource> source, Func<TSource, TKey> keySelector)
- => new OrderedEnumerable<TSource, TKey>(source, keySelector, null, false, null);
+ => new OrderedIterator<TSource, TKey>(source, keySelector, null, false, null);
public static IOrderedEnumerable<TSource> OrderBy<TSource, TKey>(this IEnumerable<TSource> source, Func<TSource, TKey> keySelector, IComparer<TKey>? comparer)
- => new OrderedEnumerable<TSource, TKey>(source, keySelector, comparer, false, null);
+ => new OrderedIterator<TSource, TKey>(source, keySelector, comparer, false, null);
/// <summary>
/// Sorts the elements of a sequence in descending order.
@@ -89,14 +89,14 @@ namespace System.Linq
/// </remarks>
public static IOrderedEnumerable<T> OrderDescending<T>(this IEnumerable<T> source, IComparer<T>? comparer) =>
TypeIsImplicitlyStable<T>() && (comparer is null || comparer == Comparer<T>.Default) ?
- new OrderedImplicitlyStableEnumerable<T>(source, descending: true) :
+ new ImplicitlyStableOrderedIterator<T>(source, descending: true) :
OrderByDescending(source, EnumerableSorter<T>.IdentityFunc, comparer);
public static IOrderedEnumerable<TSource> OrderByDescending<TSource, TKey>(this IEnumerable<TSource> source, Func<TSource, TKey> keySelector) =>
- new OrderedEnumerable<TSource, TKey>(source, keySelector, null, true, null);
+ new OrderedIterator<TSource, TKey>(source, keySelector, null, true, null);
public static IOrderedEnumerable<TSource> OrderByDescending<TSource, TKey>(this IEnumerable<TSource> source, Func<TSource, TKey> keySelector, IComparer<TKey>? comparer) =>
- new OrderedEnumerable<TSource, TKey>(source, keySelector, comparer, true, null);
+ new OrderedIterator<TSource, TKey>(source, keySelector, comparer, true, null);
public static IOrderedEnumerable<TSource> ThenBy<TSource, TKey>(this IOrderedEnumerable<TSource> source, Func<TSource, TKey> keySelector)
{
diff --git a/src/libraries/System.Linq/src/System/Linq/OrderedEnumerable.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/OrderedEnumerable.SpeedOpt.cs
index 615c196cced3..ffa533dd84e6 100644
--- a/src/libraries/System.Linq/src/System/Linq/OrderedEnumerable.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/OrderedEnumerable.SpeedOpt.cs
@@ -8,428 +8,539 @@ using System.Runtime.InteropServices;
namespace System.Linq
{
- internal abstract partial class OrderedEnumerable<TElement> : IPartition<TElement>
+ public static partial class Enumerable
{
- public virtual TElement[] ToArray()
+ internal abstract partial class OrderedIterator<TElement>
{
- TElement[] buffer = _source.ToArray();
- if (buffer.Length == 0)
+ public override TElement[] ToArray()
{
- return buffer;
- }
-
- TElement[] array = new TElement[buffer.Length];
- Fill(buffer, array);
- return array;
- }
+ TElement[] buffer = _source.ToArray();
+ if (buffer.Length == 0)
+ {
+ return buffer;
+ }
- public virtual List<TElement> ToList()
- {
- TElement[] buffer = _source.ToArray();
+ TElement[] array = new TElement[buffer.Length];
+ Fill(buffer, array);
+ return array;
+ }
- List<TElement> list = new();
- if (buffer.Length > 0)
+ public override List<TElement> ToList()
{
- Fill(buffer, Enumerable.SetCountAndGetSpan(list, buffer.Length));
- }
+ TElement[] buffer = _source.ToArray();
- return list;
- }
+ List<TElement> list = new();
+ if (buffer.Length > 0)
+ {
+ Fill(buffer, SetCountAndGetSpan(list, buffer.Length));
+ }
- private void Fill(TElement[] buffer, Span<TElement> destination)
- {
- int[] map = SortedMap(buffer);
- for (int i = 0; i < destination.Length; i++)
- {
- destination[i] = buffer[map[i]];
+ return list;
}
- }
- public int GetCount(bool onlyIfCheap)
- {
- if (_source is IIListProvider<TElement> listProv)
+ private void Fill(TElement[] buffer, Span<TElement> destination)
{
- return listProv.GetCount(onlyIfCheap);
+ int[] map = SortedMap(buffer);
+ for (int i = 0; i < destination.Length; i++)
+ {
+ destination[i] = buffer[map[i]];
+ }
}
- return !onlyIfCheap || _source is ICollection<TElement> || _source is ICollection ? _source.Count() : -1;
- }
-
- internal TElement[] ToArray(int minIdx, int maxIdx)
- {
- TElement[] buffer = _source.ToArray();
- if (buffer.Length <= minIdx)
+ public override int GetCount(bool onlyIfCheap)
{
- return [];
- }
+ if (_source is Iterator<TElement> iterator)
+ {
+ return iterator.GetCount(onlyIfCheap);
+ }
- if (buffer.Length <= maxIdx)
- {
- maxIdx = buffer.Length - 1;
+ return !onlyIfCheap || _source is ICollection<TElement> || _source is ICollection ? _source.Count() : -1;
}
- if (minIdx == maxIdx)
+ internal TElement[] ToArray(int minIdx, int maxIdx)
{
- return [GetEnumerableSorter().ElementAt(buffer, buffer.Length, minIdx)];
- }
+ TElement[] buffer = _source.ToArray();
+ if (buffer.Length <= minIdx)
+ {
+ return [];
+ }
+
+ if (buffer.Length <= maxIdx)
+ {
+ maxIdx = buffer.Length - 1;
+ }
- TElement[] array = new TElement[maxIdx - minIdx + 1];
+ if (minIdx == maxIdx)
+ {
+ return [GetEnumerableSorter().ElementAt(buffer, buffer.Length, minIdx)];
+ }
- Fill(minIdx, maxIdx, buffer, array);
+ TElement[] array = new TElement[maxIdx - minIdx + 1];
- return array;
- }
+ Fill(minIdx, maxIdx, buffer, array);
- internal List<TElement> ToList(int minIdx, int maxIdx)
- {
- TElement[] buffer = _source.ToArray();
- if (buffer.Length <= minIdx)
- {
- return new List<TElement>();
+ return array;
}
- if (buffer.Length <= maxIdx)
+ internal List<TElement> ToList(int minIdx, int maxIdx)
{
- maxIdx = buffer.Length - 1;
- }
+ TElement[] buffer = _source.ToArray();
+ if (buffer.Length <= minIdx)
+ {
+ return new List<TElement>();
+ }
- if (minIdx == maxIdx)
- {
- return new List<TElement>(1) { GetEnumerableSorter().ElementAt(buffer, buffer.Length, minIdx) };
- }
+ if (buffer.Length <= maxIdx)
+ {
+ maxIdx = buffer.Length - 1;
+ }
- List<TElement> list = new();
- Fill(minIdx, maxIdx, buffer, Enumerable.SetCountAndGetSpan(list, maxIdx - minIdx + 1));
- return list;
- }
+ if (minIdx == maxIdx)
+ {
+ return new List<TElement>(1) { GetEnumerableSorter().ElementAt(buffer, buffer.Length, minIdx) };
+ }
- private void Fill(int minIdx, int maxIdx, TElement[] buffer, Span<TElement> destination)
- {
- int[] map = SortedMap(buffer, minIdx, maxIdx);
- int idx = 0;
- while (minIdx <= maxIdx)
- {
- destination[idx] = buffer[map[minIdx]];
- ++idx;
- ++minIdx;
+ List<TElement> list = new();
+ Fill(minIdx, maxIdx, buffer, SetCountAndGetSpan(list, maxIdx - minIdx + 1));
+ return list;
}
- }
- internal int GetCount(int minIdx, int maxIdx, bool onlyIfCheap)
- {
- int count = GetCount(onlyIfCheap);
- if (count <= 0)
+ private void Fill(int minIdx, int maxIdx, TElement[] buffer, Span<TElement> destination)
{
- return count;
+ int[] map = SortedMap(buffer, minIdx, maxIdx);
+ int idx = 0;
+ while (minIdx <= maxIdx)
+ {
+ destination[idx] = buffer[map[minIdx]];
+ ++idx;
+ ++minIdx;
+ }
}
- if (count <= minIdx)
+ internal int GetCount(int minIdx, int maxIdx, bool onlyIfCheap)
{
- return 0;
- }
+ int count = GetCount(onlyIfCheap);
+ if (count <= 0)
+ {
+ return count;
+ }
- return (count <= maxIdx ? count : maxIdx + 1) - minIdx;
- }
+ if (count <= minIdx)
+ {
+ return 0;
+ }
+
+ return (count <= maxIdx ? count : maxIdx + 1) - minIdx;
+ }
- public IPartition<TElement> Skip(int count) => new OrderedPartition<TElement>(this, count, int.MaxValue);
+ public override Iterator<TElement> Skip(int count) => new SkipTakeOrderedIterator<TElement>(this, count, int.MaxValue);
- public IPartition<TElement> Take(int count) => new OrderedPartition<TElement>(this, 0, count - 1);
+ public override Iterator<TElement> Take(int count) => new SkipTakeOrderedIterator<TElement>(this, 0, count - 1);
- public TElement? TryGetElementAt(int index, out bool found)
- {
- if (index == 0)
+ public override TElement? TryGetElementAt(int index, out bool found)
{
- return TryGetFirst(out found);
+ if (index == 0)
+ {
+ return TryGetFirst(out found);
+ }
+
+ if (index > 0)
+ {
+ TElement[] buffer = _source.ToArray();
+ if (index < buffer.Length)
+ {
+ found = true;
+ return GetEnumerableSorter().ElementAt(buffer, buffer.Length, index);
+ }
+ }
+
+ found = false;
+ return default;
}
- if (index > 0)
+ public override TElement? TryGetFirst(out bool found)
{
- TElement[] buffer = _source.ToArray();
- if (index < buffer.Length)
+ CachingComparer<TElement> comparer = GetComparer();
+ using (IEnumerator<TElement> e = _source.GetEnumerator())
{
+ if (!e.MoveNext())
+ {
+ found = false;
+ return default;
+ }
+
+ TElement value = e.Current;
+ comparer.SetElement(value);
+ while (e.MoveNext())
+ {
+ TElement x = e.Current;
+ if (comparer.Compare(x, true) < 0)
+ {
+ value = x;
+ }
+ }
+
found = true;
- return GetEnumerableSorter().ElementAt(buffer, buffer.Length, index);
+ return value;
}
}
- found = false;
- return default;
- }
-
- public virtual TElement? TryGetFirst(out bool found)
- {
- CachingComparer<TElement> comparer = GetComparer();
- using (IEnumerator<TElement> e = _source.GetEnumerator())
+ public override TElement? TryGetLast(out bool found)
{
- if (!e.MoveNext())
+ using (IEnumerator<TElement> e = _source.GetEnumerator())
{
- found = false;
- return default;
- }
+ if (!e.MoveNext())
+ {
+ found = false;
+ return default;
+ }
- TElement value = e.Current;
- comparer.SetElement(value);
- while (e.MoveNext())
- {
- TElement x = e.Current;
- if (comparer.Compare(x, true) < 0)
+ CachingComparer<TElement> comparer = GetComparer();
+ TElement value = e.Current;
+ comparer.SetElement(value);
+ while (e.MoveNext())
{
- value = x;
+ TElement current = e.Current;
+ if (comparer.Compare(current, false) >= 0)
+ {
+ value = current;
+ }
}
- }
- found = true;
- return value;
+ found = true;
+ return value;
+ }
}
- }
- public virtual TElement? TryGetLast(out bool found)
- {
- using (IEnumerator<TElement> e = _source.GetEnumerator())
+ public TElement? TryGetLast(int minIdx, int maxIdx, out bool found)
{
- if (!e.MoveNext())
+ TElement[] buffer = _source.ToArray();
+ if (minIdx < buffer.Length)
{
- found = false;
- return default;
+ found = true;
+ return (maxIdx < buffer.Length - 1) ?
+ GetEnumerableSorter().ElementAt(buffer, buffer.Length, maxIdx) :
+ Last(buffer);
}
+ found = false;
+ return default;
+ }
+
+ private TElement Last(TElement[] items)
+ {
CachingComparer<TElement> comparer = GetComparer();
- TElement value = e.Current;
+
+ TElement value = items[0];
comparer.SetElement(value);
- while (e.MoveNext())
+
+ for (int i = 1; i < items.Length; ++i)
{
- TElement current = e.Current;
- if (comparer.Compare(current, false) >= 0)
+ TElement x = items[i];
+ if (comparer.Compare(x, cacheLower: false) >= 0)
{
- value = current;
+ value = x;
}
}
- found = true;
return value;
}
}
- public TElement? TryGetLast(int minIdx, int maxIdx, out bool found)
+ internal sealed partial class OrderedIterator<TElement, TKey> : OrderedIterator<TElement>
{
- TElement[] buffer = _source.ToArray();
- if (minIdx < buffer.Length)
- {
- found = true;
- return (maxIdx < buffer.Length - 1) ?
- GetEnumerableSorter().ElementAt(buffer, buffer.Length, maxIdx) :
- Last(buffer);
- }
+ // For complicated cases, rely on the base implementation that's more comprehensive.
+ // For the simple case of OrderBy(...).First() or OrderByDescending(...).First() (i.e. where
+ // there's just a single comparer we need to factor in), we can just do the iteration directly.
- found = false;
- return default;
- }
-
- private TElement Last(TElement[] items)
- {
- CachingComparer<TElement> comparer = GetComparer();
-
- TElement value = items[0];
- comparer.SetElement(value);
-
- for (int i = 1; i < items.Length; ++i)
+ public override TElement? TryGetFirst(out bool found)
{
- TElement x = items[i];
- if (comparer.Compare(x, cacheLower: false) >= 0)
+ if (_parent is not null)
{
- value = x;
+ return base.TryGetFirst(out found);
}
- }
- return value;
- }
- }
+ using IEnumerator<TElement> e = _source.GetEnumerator();
- internal sealed partial class OrderedEnumerable<TElement, TKey> : OrderedEnumerable<TElement>
- {
- // For complicated cases, rely on the base implementation that's more comprehensive.
- // For the simple case of OrderBy(...).First() or OrderByDescending(...).First() (i.e. where
- // there's just a single comparer we need to factor in), we can just do the iteration directly.
+ if (e.MoveNext())
+ {
+ IComparer<TKey> comparer = _comparer;
+ Func<TElement, TKey> keySelector = _keySelector;
- public override TElement? TryGetFirst(out bool found)
- {
- if (_parent is not null)
- {
- return base.TryGetFirst(out found);
- }
+ TElement resultValue = e.Current;
+ TKey resultKey = keySelector(resultValue);
- using IEnumerator<TElement> e = _source.GetEnumerator();
+ if (_descending)
+ {
+ while (e.MoveNext())
+ {
+ TElement nextValue = e.Current;
+ TKey nextKey = keySelector(nextValue);
+ if (comparer.Compare(nextKey, resultKey) > 0)
+ {
+ resultKey = nextKey;
+ resultValue = nextValue;
+ }
+ }
+ }
+ else
+ {
+ while (e.MoveNext())
+ {
+ TElement nextValue = e.Current;
+ TKey nextKey = keySelector(nextValue);
+ if (comparer.Compare(nextKey, resultKey) < 0)
+ {
+ resultKey = nextKey;
+ resultValue = nextValue;
+ }
+ }
+ }
+
+ found = true;
+ return resultValue;
+ }
+
+ found = false;
+ return default;
+ }
- if (e.MoveNext())
+ public override TElement? TryGetLast(out bool found)
{
- IComparer<TKey> comparer = _comparer;
- Func<TElement, TKey> keySelector = _keySelector;
+ if (_parent is not null)
+ {
+ return base.TryGetLast(out found);
+ }
- TElement resultValue = e.Current;
- TKey resultKey = keySelector(resultValue);
+ using IEnumerator<TElement> e = _source.GetEnumerator();
- if (_descending)
+ if (e.MoveNext())
{
- while (e.MoveNext())
+ IComparer<TKey> comparer = _comparer;
+ Func<TElement, TKey> keySelector = _keySelector;
+
+ TElement resultValue = e.Current;
+ TKey resultKey = keySelector(resultValue);
+
+ if (_descending)
{
- TElement nextValue = e.Current;
- TKey nextKey = keySelector(nextValue);
- if (comparer.Compare(nextKey, resultKey) > 0)
+ while (e.MoveNext())
{
- resultKey = nextKey;
- resultValue = nextValue;
+ TElement nextValue = e.Current;
+ TKey nextKey = keySelector(nextValue);
+ if (comparer.Compare(nextKey, resultKey) <= 0)
+ {
+ resultKey = nextKey;
+ resultValue = nextValue;
+ }
}
}
- }
- else
- {
- while (e.MoveNext())
+ else
{
- TElement nextValue = e.Current;
- TKey nextKey = keySelector(nextValue);
- if (comparer.Compare(nextKey, resultKey) < 0)
+ while (e.MoveNext())
{
- resultKey = nextKey;
- resultValue = nextValue;
+ TElement nextValue = e.Current;
+ TKey nextKey = keySelector(nextValue);
+ if (comparer.Compare(nextKey, resultKey) >= 0)
+ {
+ resultKey = nextKey;
+ resultValue = nextValue;
+ }
}
}
+
+ found = true;
+ return resultValue;
}
- found = true;
- return resultValue;
+ found = false;
+ return default;
}
-
- found = false;
- return default;
}
- public override TElement? TryGetLast(out bool found)
+ internal sealed partial class ImplicitlyStableOrderedIterator<TElement> : OrderedIterator<TElement>
{
- if (_parent is not null)
+ public override TElement[] ToArray()
{
- return base.TryGetLast(out found);
+ TElement[] array = _source.ToArray();
+ Sort(array, _descending);
+ return array;
}
- using IEnumerator<TElement> e = _source.GetEnumerator();
-
- if (e.MoveNext())
+ public override List<TElement> ToList()
{
- IComparer<TKey> comparer = _comparer;
- Func<TElement, TKey> keySelector = _keySelector;
+ List<TElement> list = _source.ToList();
+ Sort(CollectionsMarshal.AsSpan(list), _descending);
+ return list;
+ }
- TElement resultValue = e.Current;
- TKey resultKey = keySelector(resultValue);
+ public override TElement? TryGetFirst(out bool found) =>
+ TryGetFirstOrLast(out found, first: !_descending);
- if (_descending)
+ public override TElement? TryGetLast(out bool found) =>
+ TryGetFirstOrLast(out found, first: _descending);
+
+ private TElement? TryGetFirstOrLast(out bool found, bool first)
+ {
+ if (TryGetSpan(_source, out ReadOnlySpan<TElement> span))
{
- while (e.MoveNext())
+ if (span.Length != 0)
{
- TElement nextValue = e.Current;
- TKey nextKey = keySelector(nextValue);
- if (comparer.Compare(nextKey, resultKey) <= 0)
- {
- resultKey = nextKey;
- resultValue = nextValue;
- }
+ Debug.Assert(TypeIsImplicitlyStable<TElement>(), "Using Min/Max has different semantics for floating-point values.");
+
+ found = true;
+ return first ?
+ Min(_source) :
+ Max(_source);
}
}
else
{
- while (e.MoveNext())
+ using IEnumerator<TElement> e = _source.GetEnumerator();
+
+ if (e.MoveNext())
{
- TElement nextValue = e.Current;
- TKey nextKey = keySelector(nextValue);
- if (comparer.Compare(nextKey, resultKey) >= 0)
+ TElement resultValue = e.Current;
+
+ if (first)
{
- resultKey = nextKey;
- resultValue = nextValue;
+ while (e.MoveNext())
+ {
+ TElement nextValue = e.Current;
+ if (Comparer<TElement>.Default.Compare(nextValue, resultValue) < 0)
+ {
+ resultValue = nextValue;
+ }
+ }
+ }
+ else
+ {
+ while (e.MoveNext())
+ {
+ TElement nextValue = e.Current;
+ if (Comparer<TElement>.Default.Compare(nextValue, resultValue) >= 0)
+ {
+ resultValue = nextValue;
+ }
+ }
}
+
+ found = true;
+ return resultValue;
}
}
- found = true;
- return resultValue;
+ found = false;
+ return default;
}
-
- found = false;
- return default;
}
- }
- internal sealed partial class OrderedImplicitlyStableEnumerable<TElement> : OrderedEnumerable<TElement>
- {
- public override TElement[] ToArray()
+ internal sealed class SkipTakeOrderedIterator<TElement> : Iterator<TElement>
{
- TElement[] array = _source.ToArray();
- Sort(array, _descending);
- return array;
- }
+ private readonly OrderedIterator<TElement> _source;
+ private readonly int _minIndexInclusive;
+ private readonly int _maxIndexInclusive;
- public override List<TElement> ToList()
- {
- List<TElement> list = _source.ToList();
- Sort(CollectionsMarshal.AsSpan(list), _descending);
- return list;
- }
+ private TElement[]? _buffer;
+ private int[]? _map;
+ private int _maxIdx;
- public override TElement? TryGetFirst(out bool found) =>
- TryGetFirstOrLast(out found, first: !_descending);
+ public SkipTakeOrderedIterator(OrderedIterator<TElement> source, int minIdxInclusive, int maxIdxInclusive)
+ {
+ _source = source;
+ _minIndexInclusive = minIdxInclusive;
+ _maxIndexInclusive = maxIdxInclusive;
+ }
- public override TElement? TryGetLast(out bool found) =>
- TryGetFirstOrLast(out found, first: _descending);
+ public override Iterator<TElement> Clone() => new SkipTakeOrderedIterator<TElement>(_source, _minIndexInclusive, _maxIndexInclusive);
- private TElement? TryGetFirstOrLast(out bool found, bool first)
- {
- if (Enumerable.TryGetSpan(_source, out ReadOnlySpan<TElement> span))
+ public override bool MoveNext()
{
- if (span.Length != 0)
+ int state = _state;
+
+ Initialized:
+ if (state > 1)
{
- Debug.Assert(Enumerable.TypeIsImplicitlyStable<TElement>(), "Using Min/Max has different semantics for floating-point values.");
+ Debug.Assert(_buffer is not null);
+ Debug.Assert(_map is not null);
- found = true;
- return first ?
- Enumerable.Min(_source) :
- Enumerable.Max(_source);
+ int[] map = _map;
+ int i = state - 2 + _minIndexInclusive;
+ if (i <= _maxIdx)
+ {
+ _current = _buffer[map[i]];
+ _state++;
+ return true;
+ }
}
- }
- else
- {
- using IEnumerator<TElement> e = _source.GetEnumerator();
-
- if (e.MoveNext())
+ else if (state == 1)
{
- TElement resultValue = e.Current;
-
- if (first)
+ TElement[] buffer = _source.ToArray();
+ int count = buffer.Length;
+ if (count > _minIndexInclusive)
{
- while (e.MoveNext())
+ _maxIdx = _maxIndexInclusive;
+ if (count <= _maxIdx)
{
- TElement nextValue = e.Current;
- if (Comparer<TElement>.Default.Compare(nextValue, resultValue) < 0)
- {
- resultValue = nextValue;
- }
+ _maxIdx = count - 1;
}
- }
- else
- {
- while (e.MoveNext())
+
+ if (_minIndexInclusive == _maxIdx)
{
- TElement nextValue = e.Current;
- if (Comparer<TElement>.Default.Compare(nextValue, resultValue) >= 0)
- {
- resultValue = nextValue;
- }
+ _current = _source.GetEnumerableSorter().ElementAt(buffer, count, _minIndexInclusive);
+ _state = -1;
+ return true;
}
+
+ _map = _source.SortedMap(buffer, _minIndexInclusive, _maxIdx);
+ _buffer = buffer;
+ _state = state = 2;
+ goto Initialized;
}
+ }
- found = true;
- return resultValue;
+ Dispose();
+ return false;
+ }
+
+ public override Iterator<TElement>? Skip(int count)
+ {
+ int minIndex = _minIndexInclusive + count;
+ return (uint)minIndex > (uint)_maxIndexInclusive ? null : new SkipTakeOrderedIterator<TElement>(_source, minIndex, _maxIndexInclusive);
+ }
+
+ public override Iterator<TElement> Take(int count)
+ {
+ int maxIndex = _minIndexInclusive + count - 1;
+ if ((uint)maxIndex >= (uint)_maxIndexInclusive)
+ {
+ return this;
}
+
+ return new SkipTakeOrderedIterator<TElement>(_source, _minIndexInclusive, maxIndex);
}
- found = false;
- return default;
+ public override TElement? TryGetElementAt(int index, out bool found)
+ {
+ if ((uint)index <= (uint)(_maxIndexInclusive - _minIndexInclusive))
+ {
+ return _source.TryGetElementAt(index + _minIndexInclusive, out found);
+ }
+
+ found = false;
+ return default;
+ }
+
+ public override TElement? TryGetFirst(out bool found) => _source.TryGetElementAt(_minIndexInclusive, out found);
+
+ public override TElement? TryGetLast(out bool found) =>
+ _source.TryGetLast(_minIndexInclusive, _maxIndexInclusive, out found);
+
+ public override TElement[] ToArray() => _source.ToArray(_minIndexInclusive, _maxIndexInclusive);
+
+ public override List<TElement> ToList() => _source.ToList(_minIndexInclusive, _maxIndexInclusive);
+
+ public override int GetCount(bool onlyIfCheap) => _source.GetCount(_minIndexInclusive, _maxIndexInclusive, onlyIfCheap);
}
}
}
diff --git a/src/libraries/System.Linq/src/System/Linq/OrderedEnumerable.cs b/src/libraries/System.Linq/src/System/Linq/OrderedEnumerable.cs
index 6b6b83bac93f..40ee075a9b31 100644
--- a/src/libraries/System.Linq/src/System/Linq/OrderedEnumerable.cs
+++ b/src/libraries/System.Linq/src/System/Linq/OrderedEnumerable.cs
@@ -7,618 +7,657 @@ using System.Diagnostics;
namespace System.Linq
{
- internal abstract partial class OrderedEnumerable<TElement> : IOrderedEnumerable<TElement>
+ public static partial class Enumerable
{
- internal IEnumerable<TElement> _source;
+ internal abstract partial class OrderedIterator<TElement> : Iterator<TElement>, IOrderedEnumerable<TElement>
+ {
+ internal readonly IEnumerable<TElement> _source;
- protected OrderedEnumerable(IEnumerable<TElement> source) => _source = source;
+ protected OrderedIterator(IEnumerable<TElement> source) => _source = source;
- private int[] SortedMap(TElement[] buffer) => GetEnumerableSorter().Sort(buffer, buffer.Length);
+ private protected int[] SortedMap(TElement[] buffer) => GetEnumerableSorter().Sort(buffer, buffer.Length);
- private int[] SortedMap(TElement[] buffer, int minIdx, int maxIdx) =>
- GetEnumerableSorter().Sort(buffer, buffer.Length, minIdx, maxIdx);
+ internal int[] SortedMap(TElement[] buffer, int minIdx, int maxIdx) =>
+ GetEnumerableSorter().Sort(buffer, buffer.Length, minIdx, maxIdx);
- public virtual IEnumerator<TElement> GetEnumerator()
- {
- TElement[] buffer = _source.ToArray();
- if (buffer.Length > 0)
+ internal abstract EnumerableSorter<TElement> GetEnumerableSorter(EnumerableSorter<TElement>? next = null);
+
+ internal abstract CachingComparer<TElement> GetComparer(CachingComparer<TElement>? childComparer = null);
+
+ IEnumerator IEnumerable.GetEnumerator() => GetEnumerator();
+
+ IOrderedEnumerable<TElement> IOrderedEnumerable<TElement>.CreateOrderedEnumerable<TKey>(Func<TElement, TKey> keySelector, IComparer<TKey>? comparer, bool descending) =>
+ new OrderedIterator<TElement, TKey>(_source, keySelector, comparer, @descending, this);
+
+ public TElement? TryGetLast(Func<TElement, bool> predicate, out bool found)
{
- int[] map = SortedMap(buffer);
- for (int i = 0; i < buffer.Length; i++)
+ CachingComparer<TElement> comparer = GetComparer();
+ using (IEnumerator<TElement> e = _source.GetEnumerator())
{
- yield return buffer[map[i]];
+ TElement value;
+ do
+ {
+ if (!e.MoveNext())
+ {
+ found = false;
+ return default;
+ }
+
+ value = e.Current;
+ }
+ while (!predicate(value));
+
+ comparer.SetElement(value);
+ while (e.MoveNext())
+ {
+ TElement x = e.Current;
+ if (predicate(x) && comparer.Compare(x, false) >= 0)
+ {
+ value = x;
+ }
+ }
+
+ found = true;
+ return value;
}
}
}
- internal IEnumerator<TElement> GetEnumerator(int minIdx, int maxIdx)
+ internal sealed partial class OrderedIterator<TElement, TKey> : OrderedIterator<TElement>
{
- TElement[] buffer = _source.ToArray();
- int count = buffer.Length;
- if (count > minIdx)
+ private readonly OrderedIterator<TElement>? _parent;
+ private readonly Func<TElement, TKey> _keySelector;
+ private readonly IComparer<TKey> _comparer;
+ private readonly bool _descending;
+ private TElement[]? _buffer;
+ private int[]? _map;
+
+ internal OrderedIterator(IEnumerable<TElement> source, Func<TElement, TKey> keySelector, IComparer<TKey>? comparer, bool descending, OrderedIterator<TElement>? parent) :
+ base(source)
{
- if (count <= maxIdx)
+ if (source is null)
{
- maxIdx = count - 1;
+ ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source);
}
-
- if (minIdx == maxIdx)
+ if (keySelector is null)
{
- yield return GetEnumerableSorter().ElementAt(buffer, count, minIdx);
- }
- else
- {
- int[] map = SortedMap(buffer, minIdx, maxIdx);
- while (minIdx <= maxIdx)
- {
- yield return buffer[map[minIdx]];
- ++minIdx;
- }
+ ThrowHelper.ThrowArgumentNullException(ExceptionArgument.keySelector);
}
+
+ _parent = parent;
+ _keySelector = keySelector;
+ _comparer = comparer ?? Comparer<TKey>.Default;
+ _descending = descending;
}
- }
- private EnumerableSorter<TElement> GetEnumerableSorter() => GetEnumerableSorter(null);
+ public override Iterator<TElement> Clone() => new OrderedIterator<TElement, TKey>(_source, _keySelector, _comparer, _descending, _parent);
- internal abstract EnumerableSorter<TElement> GetEnumerableSorter(EnumerableSorter<TElement>? next);
+ internal override EnumerableSorter<TElement> GetEnumerableSorter(EnumerableSorter<TElement>? next)
+ {
+ // Special case the common use of string with default comparer. Comparer<string>.Default checks the
+ // thread's Culture on each call which is an overhead which is not required, because we are about to
+ // do a sort which remains on the current thread (and EnumerableSorter is not used afterwards).
+ IComparer<TKey> comparer = _comparer;
+ if (typeof(TKey) == typeof(string) && comparer == Comparer<string>.Default)
+ {
+ comparer = (IComparer<TKey>)StringComparer.CurrentCulture;
+ }
- internal abstract CachingComparer<TElement> GetComparer(CachingComparer<TElement>? childComparer = null);
+ EnumerableSorter<TElement> sorter = new EnumerableSorter<TElement, TKey>(_keySelector, comparer, _descending, next);
+ if (_parent != null)
+ {
+ sorter = _parent.GetEnumerableSorter(sorter);
+ }
- IEnumerator IEnumerable.GetEnumerator() => GetEnumerator();
+ return sorter;
+ }
- IOrderedEnumerable<TElement> IOrderedEnumerable<TElement>.CreateOrderedEnumerable<TKey>(Func<TElement, TKey> keySelector, IComparer<TKey>? comparer, bool descending) =>
- new OrderedEnumerable<TElement, TKey>(_source, keySelector, comparer, @descending, this);
+ internal override CachingComparer<TElement> GetComparer(CachingComparer<TElement>? childComparer)
+ {
+ CachingComparer<TElement> cmp = childComparer == null
+ ? new CachingComparer<TElement, TKey>(_keySelector, _comparer, _descending)
+ : new CachingComparerWithChild<TElement, TKey>(_keySelector, _comparer, _descending, childComparer);
+ return _parent != null ? _parent.GetComparer(cmp) : cmp;
+ }
- public TElement? TryGetLast(Func<TElement, bool> predicate, out bool found)
- {
- CachingComparer<TElement> comparer = GetComparer();
- using (IEnumerator<TElement> e = _source.GetEnumerator())
+ public override bool MoveNext()
{
- TElement value;
- do
+ int state = _state;
+
+ Initialized:
+ if (state > 1)
{
- if (!e.MoveNext())
+ Debug.Assert(_buffer is not null);
+ Debug.Assert(_map is not null);
+ Debug.Assert(_map.Length == _buffer.Length);
+
+ int[] map = _map;
+ int i = state - 2;
+ if ((uint)i < (uint)map.Length)
{
- found = false;
- return default;
+ _current = _buffer[map[i]];
+ _state++;
+ return true;
}
-
- value = e.Current;
}
- while (!predicate(value));
-
- comparer.SetElement(value);
- while (e.MoveNext())
+ else if (state == 1)
{
- TElement x = e.Current;
- if (predicate(x) && comparer.Compare(x, false) >= 0)
+ TElement[] buffer = _source.ToArray();
+ if (buffer.Length != 0)
{
- value = x;
+ _map = SortedMap(buffer);
+ _buffer = buffer;
+ _state = state = 2;
+ goto Initialized;
}
}
- found = true;
- return value;
+ Dispose();
+ return false;
}
- }
- }
- internal sealed partial class OrderedEnumerable<TElement, TKey> : OrderedEnumerable<TElement>
- {
- private readonly OrderedEnumerable<TElement>? _parent;
- private readonly Func<TElement, TKey> _keySelector;
- private readonly IComparer<TKey> _comparer;
- private readonly bool _descending;
-
- internal OrderedEnumerable(IEnumerable<TElement> source, Func<TElement, TKey> keySelector, IComparer<TKey>? comparer, bool descending, OrderedEnumerable<TElement>? parent) :
- base(source)
- {
- if (source is null)
+ public override void Dispose()
{
- ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source);
+ _buffer = null;
+ _map = null;
+ base.Dispose();
}
- if (keySelector is null)
- {
- ThrowHelper.ThrowArgumentNullException(ExceptionArgument.keySelector);
- }
-
- _parent = parent;
- _keySelector = keySelector;
- _comparer = comparer ?? Comparer<TKey>.Default;
- _descending = descending;
}
- internal override EnumerableSorter<TElement> GetEnumerableSorter(EnumerableSorter<TElement>? next)
+ /// <summary>An ordered enumerable used by Order/OrderDescending for Ts that are bitwise indistinguishable for any considered equal.</summary>
+ internal sealed partial class ImplicitlyStableOrderedIterator<TElement> : OrderedIterator<TElement>
{
- // Special case the common use of string with default comparer. Comparer<string>.Default checks the
- // thread's Culture on each call which is an overhead which is not required, because we are about to
- // do a sort which remains on the current thread (and EnumerableSorter is not used afterwards).
- IComparer<TKey> comparer = _comparer;
- if (typeof(TKey) == typeof(string) && comparer == Comparer<string>.Default)
- {
- comparer = (IComparer<TKey>)StringComparer.CurrentCulture;
- }
+ private readonly bool _descending;
+ private TElement[]? _buffer;
- EnumerableSorter<TElement> sorter = new EnumerableSorter<TElement, TKey>(_keySelector, comparer, _descending, next);
- if (_parent != null)
+ public ImplicitlyStableOrderedIterator(IEnumerable<TElement> source, bool descending) : base(source)
{
- sorter = _parent.GetEnumerableSorter(sorter);
- }
-
- return sorter;
- }
-
- internal override CachingComparer<TElement> GetComparer(CachingComparer<TElement>? childComparer)
- {
- CachingComparer<TElement> cmp = childComparer == null
- ? new CachingComparer<TElement, TKey>(_keySelector, _comparer, _descending)
- : new CachingComparerWithChild<TElement, TKey>(_keySelector, _comparer, _descending, childComparer);
- return _parent != null ? _parent.GetComparer(cmp) : cmp;
- }
- }
-
- /// <summary>An ordered enumerable used by Order/OrderDescending for Ts that are bitwise indistinguishable for any considered equal.</summary>
- internal sealed partial class OrderedImplicitlyStableEnumerable<TElement> : OrderedEnumerable<TElement>
- {
- private readonly bool _descending;
+ Debug.Assert(TypeIsImplicitlyStable<TElement>());
- public OrderedImplicitlyStableEnumerable(IEnumerable<TElement> source, bool descending) : base(source)
- {
- Debug.Assert(Enumerable.TypeIsImplicitlyStable<TElement>());
+ if (source is null)
+ {
+ ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source);
+ }
- if (source is null)
- {
- ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source);
+ _descending = descending;
}
- _descending = descending;
- }
+ public override Iterator<TElement> Clone() => new ImplicitlyStableOrderedIterator<TElement>(_source, _descending);
- internal override CachingComparer<TElement> GetComparer(CachingComparer<TElement>? childComparer) =>
- childComparer == null ?
- new CachingComparer<TElement, TElement>(EnumerableSorter<TElement>.IdentityFunc, Comparer<TElement>.Default, _descending) :
- new CachingComparerWithChild<TElement, TElement>(EnumerableSorter<TElement>.IdentityFunc, Comparer<TElement>.Default, _descending, childComparer);
+ internal override CachingComparer<TElement> GetComparer(CachingComparer<TElement>? childComparer) =>
+ childComparer == null ?
+ new CachingComparer<TElement, TElement>(EnumerableSorter<TElement>.IdentityFunc, Comparer<TElement>.Default, _descending) :
+ new CachingComparerWithChild<TElement, TElement>(EnumerableSorter<TElement>.IdentityFunc, Comparer<TElement>.Default, _descending, childComparer);
- internal override EnumerableSorter<TElement> GetEnumerableSorter(EnumerableSorter<TElement>? next) =>
- new EnumerableSorter<TElement, TElement>(EnumerableSorter<TElement>.IdentityFunc, Comparer<TElement>.Default, _descending, next);
+ internal override EnumerableSorter<TElement> GetEnumerableSorter(EnumerableSorter<TElement>? next) =>
+ new EnumerableSorter<TElement, TElement>(EnumerableSorter<TElement>.IdentityFunc, Comparer<TElement>.Default, _descending, next);
- public override IEnumerator<TElement> GetEnumerator()
- {
- TElement[] buffer = _source.ToArray();
- if (buffer.Length > 0)
+ public override bool MoveNext()
{
- Sort(buffer, _descending);
- for (int i = 0; i < buffer.Length; i++)
+ int state = _state;
+ TElement[]? buffer;
+
+ Initialized:
+ if (state > 1)
{
- yield return buffer[i];
+ buffer = _buffer;
+ Debug.Assert(buffer is not null);
+
+ int i = state - 2;
+ if ((uint)i < (uint)buffer.Length)
+ {
+ _current = buffer[i];
+ _state++;
+ return true;
+ }
+ }
+ else if (state == 1)
+ {
+ buffer = _source.ToArray();
+ if (buffer.Length != 0)
+ {
+ Sort(buffer, _descending);
+ _buffer = buffer;
+ _state = state = 2;
+ goto Initialized;
+ }
}
- }
- }
- private static void Sort(Span<TElement> span, bool descending)
- {
- if (descending)
- {
- span.Sort(static (a, b) => Comparer<TElement>.Default.Compare(b, a));
+ Dispose();
+ return false;
}
- else
+
+ public override void Dispose()
{
- span.Sort();
+ _buffer = null;
+ base.Dispose();
}
- }
- }
-
- // A comparer that chains comparisons, and pushes through the last element found to be
- // lower or higher (depending on use), so as to represent the sort of comparisons
- // done by OrderBy().ThenBy() combinations.
- internal abstract class CachingComparer<TElement>
- {
- internal abstract int Compare(TElement element, bool cacheLower);
-
- internal abstract void SetElement(TElement element);
- }
-
- internal class CachingComparer<TElement, TKey> : CachingComparer<TElement>
- {
- protected readonly Func<TElement, TKey> _keySelector;
- protected readonly IComparer<TKey> _comparer;
- protected readonly bool _descending;
- protected TKey? _lastKey;
-
- public CachingComparer(Func<TElement, TKey> keySelector, IComparer<TKey> comparer, bool descending)
- {
- _keySelector = keySelector;
- _comparer = comparer;
- _descending = descending;
- }
- internal override int Compare(TElement element, bool cacheLower)
- {
- TKey newKey = _keySelector(element);
- int cmp = _descending ? _comparer.Compare(_lastKey, newKey) : _comparer.Compare(newKey, _lastKey);
- if (cacheLower == cmp < 0)
+ private static void Sort(Span<TElement> span, bool descending)
{
- _lastKey = newKey;
+ if (descending)
+ {
+ span.Sort(static (a, b) => Comparer<TElement>.Default.Compare(b, a));
+ }
+ else
+ {
+ span.Sort();
+ }
}
-
- return cmp;
}
- internal override void SetElement(TElement element)
+ // A comparer that chains comparisons, and pushes through the last element found to be
+ // lower or higher (depending on use), so as to represent the sort of comparisons
+ // done by OrderBy().ThenBy() combinations.
+ internal abstract class CachingComparer<TElement>
{
- _lastKey = _keySelector(element);
- }
- }
-
- internal sealed class CachingComparerWithChild<TElement, TKey> : CachingComparer<TElement, TKey>
- {
- private readonly CachingComparer<TElement> _child;
+ internal abstract int Compare(TElement element, bool cacheLower);
- public CachingComparerWithChild(Func<TElement, TKey> keySelector, IComparer<TKey> comparer, bool descending, CachingComparer<TElement> child)
- : base(keySelector, comparer, descending)
- {
- _child = child;
+ internal abstract void SetElement(TElement element);
}
- internal override int Compare(TElement element, bool cacheLower)
+ internal class CachingComparer<TElement, TKey> : CachingComparer<TElement>
{
- TKey newKey = _keySelector(element);
- int cmp = _descending ? _comparer.Compare(_lastKey, newKey) : _comparer.Compare(newKey, _lastKey);
- if (cmp == 0)
+ protected readonly Func<TElement, TKey> _keySelector;
+ protected readonly IComparer<TKey> _comparer;
+ protected readonly bool _descending;
+ protected TKey? _lastKey;
+
+ public CachingComparer(Func<TElement, TKey> keySelector, IComparer<TKey> comparer, bool descending)
{
- return _child.Compare(element, cacheLower);
+ _keySelector = keySelector;
+ _comparer = comparer;
+ _descending = descending;
}
- if (cacheLower == cmp < 0)
+ internal override int Compare(TElement element, bool cacheLower)
{
- _lastKey = newKey;
- _child.SetElement(element);
+ TKey newKey = _keySelector(element);
+ int cmp = _descending ? _comparer.Compare(_lastKey, newKey) : _comparer.Compare(newKey, _lastKey);
+ if (cacheLower == cmp < 0)
+ {
+ _lastKey = newKey;
+ }
+
+ return cmp;
}
- return cmp;
+ internal override void SetElement(TElement element)
+ {
+ _lastKey = _keySelector(element);
+ }
}
- internal override void SetElement(TElement element)
+ internal sealed class CachingComparerWithChild<TElement, TKey> : CachingComparer<TElement, TKey>
{
- base.SetElement(element);
- _child.SetElement(element);
- }
- }
-
- internal abstract class EnumerableSorter<TElement>
- {
- /// <summary>Function that returns its input unmodified.</summary>
- /// <remarks>
- /// Used for reference equality in order to avoid unnecessary computation when a caller
- /// can benefit from knowing that the produced value is identical to the input.
- /// </remarks>
- internal static readonly Func<TElement, TElement> IdentityFunc = e => e;
-
- internal abstract void ComputeKeys(TElement[] elements, int count);
-
- internal abstract int CompareAnyKeys(int index1, int index2);
+ private readonly CachingComparer<TElement> _child;
- private int[] ComputeMap(TElement[] elements, int count)
- {
- ComputeKeys(elements, count);
- int[] map = new int[count];
- for (int i = 0; i < map.Length; i++)
+ public CachingComparerWithChild(Func<TElement, TKey> keySelector, IComparer<TKey> comparer, bool descending, CachingComparer<TElement> child)
+ : base(keySelector, comparer, descending)
{
- map[i] = i;
+ _child = child;
}
- return map;
- }
+ internal override int Compare(TElement element, bool cacheLower)
+ {
+ TKey newKey = _keySelector(element);
+ int cmp = _descending ? _comparer.Compare(_lastKey, newKey) : _comparer.Compare(newKey, _lastKey);
+ if (cmp == 0)
+ {
+ return _child.Compare(element, cacheLower);
+ }
- internal int[] Sort(TElement[] elements, int count)
- {
- int[] map = ComputeMap(elements, count);
- QuickSort(map, 0, count - 1);
- return map;
- }
+ if (cacheLower == cmp < 0)
+ {
+ _lastKey = newKey;
+ _child.SetElement(element);
+ }
- internal int[] Sort(TElement[] elements, int count, int minIdx, int maxIdx)
- {
- int[] map = ComputeMap(elements, count);
- PartialQuickSort(map, 0, count - 1, minIdx, maxIdx);
- return map;
- }
+ return cmp;
+ }
- internal TElement ElementAt(TElement[] elements, int count, int idx)
- {
- int[] map = ComputeMap(elements, count);
- return idx == 0 ?
- elements[Min(map, count)] :
- elements[QuickSelect(map, count - 1, idx)];
+ internal override void SetElement(TElement element)
+ {
+ base.SetElement(element);
+ _child.SetElement(element);
+ }
}
- protected abstract void QuickSort(int[] map, int left, int right);
-
- // Sorts the k elements between minIdx and maxIdx without sorting all elements
- // Time complexity: O(n + k log k) best and average case. O(n^2) worse case.
- protected abstract void PartialQuickSort(int[] map, int left, int right, int minIdx, int maxIdx);
+ internal abstract class EnumerableSorter<TElement>
+ {
+ /// <summary>Function that returns its input unmodified.</summary>
+ /// <remarks>
+ /// Used for reference equality in order to avoid unnecessary computation when a caller
+ /// can benefit from knowing that the produced value is identical to the input.
+ /// </remarks>
+ internal static readonly Func<TElement, TElement> IdentityFunc = e => e;
- // Finds the element that would be at idx if the collection was sorted.
- // Time complexity: O(n) best and average case. O(n^2) worse case.
- protected abstract int QuickSelect(int[] map, int right, int idx);
+ internal abstract void ComputeKeys(TElement[] elements, int count);
- protected abstract int Min(int[] map, int count);
- }
+ internal abstract int CompareAnyKeys(int index1, int index2);
- internal sealed class EnumerableSorter<TElement, TKey> : EnumerableSorter<TElement>
- {
- private readonly Func<TElement, TKey> _keySelector;
- private readonly IComparer<TKey> _comparer;
- private readonly bool _descending;
- private readonly EnumerableSorter<TElement>? _next;
- private TKey[]? _keys;
-
- internal EnumerableSorter(Func<TElement, TKey> keySelector, IComparer<TKey> comparer, bool descending, EnumerableSorter<TElement>? next)
- {
- _keySelector = keySelector;
- _comparer = comparer;
- _descending = descending;
- _next = next;
- }
-
- internal override void ComputeKeys(TElement[] elements, int count)
- {
- Func<TElement, TKey> keySelector = _keySelector;
- if (!ReferenceEquals(keySelector, IdentityFunc))
+ private int[] ComputeMap(TElement[] elements, int count)
{
- var keys = new TKey[count];
- for (int i = 0; i < keys.Length; i++)
+ ComputeKeys(elements, count);
+ int[] map = new int[count];
+ for (int i = 0; i < map.Length; i++)
{
- keys[i] = keySelector(elements[i]);
+ map[i] = i;
}
- _keys = keys;
+
+ return map;
}
- else
+
+ internal int[] Sort(TElement[] elements, int count)
{
- // The key selector is our known identity function, which means we don't
- // need to invoke the key selector for every element. Further, we can just
- // use the original array as the keys (even if count is smaller, as the additional
- // values will just be ignored).
- Debug.Assert(typeof(TKey) == typeof(TElement));
- _keys = (TKey[])(object)elements;
+ int[] map = ComputeMap(elements, count);
+ QuickSort(map, 0, count - 1);
+ return map;
}
- _next?.ComputeKeys(elements, count);
- }
-
- internal override int CompareAnyKeys(int index1, int index2)
- {
- TKey[]? keys = _keys;
- Debug.Assert(keys != null);
-
- int c = _comparer.Compare(keys[index1], keys[index2]);
- if (c == 0)
+ internal int[] Sort(TElement[] elements, int count, int minIdx, int maxIdx)
{
- if (_next == null)
- {
- return index1 - index2; // ensure stability of sort
- }
+ int[] map = ComputeMap(elements, count);
+ PartialQuickSort(map, 0, count - 1, minIdx, maxIdx);
+ return map;
+ }
- return _next.CompareAnyKeys(index1, index2);
+ internal TElement ElementAt(TElement[] elements, int count, int idx)
+ {
+ int[] map = ComputeMap(elements, count);
+ return idx == 0 ?
+ elements[Min(map, count)] :
+ elements[QuickSelect(map, count - 1, idx)];
}
- // -c will result in a negative value for int.MinValue (-int.MinValue == int.MinValue).
- // Flipping keys earlier is more likely to trigger something strange in a comparer,
- // particularly as it comes to the sort being stable.
- return (_descending != (c > 0)) ? 1 : -1;
- }
+ protected abstract void QuickSort(int[] map, int left, int right);
- private int CompareAnyKeys_DefaultComparer_NoNext_Ascending(int index1, int index2)
- {
- Debug.Assert(typeof(TKey).IsValueType);
- Debug.Assert(_comparer == Comparer<TKey>.Default);
- Debug.Assert(_next is null);
- Debug.Assert(!_descending);
-
- TKey[]? keys = _keys;
- Debug.Assert(keys != null);
-
- int c = Comparer<TKey>.Default.Compare(keys[index1], keys[index2]);
- return
- c == 0 ? index1 - index2 : // ensure stability of sort
- c;
- }
+ // Sorts the k elements between minIdx and maxIdx without sorting all elements
+ // Time complexity: O(n + k log k) best and average case. O(n^2) worse case.
+ protected abstract void PartialQuickSort(int[] map, int left, int right, int minIdx, int maxIdx);
- private int CompareAnyKeys_DefaultComparer_NoNext_Descending(int index1, int index2)
- {
- Debug.Assert(typeof(TKey).IsValueType);
- Debug.Assert(_comparer == Comparer<TKey>.Default);
- Debug.Assert(_next is null);
- Debug.Assert(_descending);
-
- TKey[]? keys = _keys;
- Debug.Assert(keys != null);
-
- int c = Comparer<TKey>.Default.Compare(keys[index2], keys[index1]);
- return
- c == 0 ? index1 - index2 : // ensure stability of sort
- c;
- }
+ // Finds the element that would be at idx if the collection was sorted.
+ // Time complexity: O(n) best and average case. O(n^2) worse case.
+ protected abstract int QuickSelect(int[] map, int right, int idx);
- private int CompareKeys(int index1, int index2) => index1 == index2 ? 0 : CompareAnyKeys(index1, index2);
+ protected abstract int Min(int[] map, int count);
+ }
- protected override void QuickSort(int[] keys, int lo, int hi)
+ internal sealed class EnumerableSorter<TElement, TKey> : EnumerableSorter<TElement>
{
- Comparison<int> comparison;
+ private readonly Func<TElement, TKey> _keySelector;
+ private readonly IComparer<TKey> _comparer;
+ private readonly bool _descending;
+ private readonly EnumerableSorter<TElement>? _next;
+ private TKey[]? _keys;
+
+ internal EnumerableSorter(Func<TElement, TKey> keySelector, IComparer<TKey> comparer, bool descending, EnumerableSorter<TElement>? next)
+ {
+ _keySelector = keySelector;
+ _comparer = comparer;
+ _descending = descending;
+ _next = next;
+ }
- if (typeof(TKey).IsValueType && _next is null && _comparer == Comparer<TKey>.Default)
+ internal override void ComputeKeys(TElement[] elements, int count)
{
- // We can use Comparer<TKey>.Default.Compare and benefit from devirtualization and inlining.
- // We can also avoid extra steps to check whether we need to deal with a subsequent tie breaker (_next).
- if (!_descending)
+ Func<TElement, TKey> keySelector = _keySelector;
+ if (!ReferenceEquals(keySelector, IdentityFunc))
{
- comparison = CompareAnyKeys_DefaultComparer_NoNext_Ascending;
+ var keys = new TKey[count];
+ for (int i = 0; i < keys.Length; i++)
+ {
+ keys[i] = keySelector(elements[i]);
+ }
+ _keys = keys;
}
else
{
- comparison = CompareAnyKeys_DefaultComparer_NoNext_Descending;
+ // The key selector is our known identity function, which means we don't
+ // need to invoke the key selector for every element. Further, we can just
+ // use the original array as the keys (even if count is smaller, as the additional
+ // values will just be ignored).
+ Debug.Assert(typeof(TKey) == typeof(TElement));
+ _keys = (TKey[])(object)elements;
}
- }
- else
- {
- comparison = CompareAnyKeys;
- }
- new Span<int>(keys, lo, hi - lo + 1).Sort(comparison);
- }
+ _next?.ComputeKeys(elements, count);
+ }
- // Sorts the k elements between minIdx and maxIdx without sorting all elements
- // Time complexity: O(n + k log k) best and average case. O(n^2) worse case.
- protected override void PartialQuickSort(int[] map, int left, int right, int minIdx, int maxIdx)
- {
- do
+ internal override int CompareAnyKeys(int index1, int index2)
{
- int i = left;
- int j = right;
- int x = map[i + ((j - i) >> 1)];
- do
+ TKey[]? keys = _keys;
+ Debug.Assert(keys != null);
+
+ int c = _comparer.Compare(keys[index1], keys[index2]);
+ if (c == 0)
{
- while (i < map.Length && CompareKeys(x, map[i]) > 0)
+ if (_next == null)
{
- i++;
+ return index1 - index2; // ensure stability of sort
}
- while (j >= 0 && CompareKeys(x, map[j]) < 0)
- {
- j--;
- }
+ return _next.CompareAnyKeys(index1, index2);
+ }
- if (i > j)
- {
- break;
- }
+ // -c will result in a negative value for int.MinValue (-int.MinValue == int.MinValue).
+ // Flipping keys earlier is more likely to trigger something strange in a comparer,
+ // particularly as it comes to the sort being stable.
+ return (_descending != (c > 0)) ? 1 : -1;
+ }
- if (i < j)
- {
- int temp = map[i];
- map[i] = map[j];
- map[j] = temp;
- }
+ private int CompareAnyKeys_DefaultComparer_NoNext_Ascending(int index1, int index2)
+ {
+ Debug.Assert(typeof(TKey).IsValueType);
+ Debug.Assert(_comparer == Comparer<TKey>.Default);
+ Debug.Assert(_next is null);
+ Debug.Assert(!_descending);
+
+ TKey[]? keys = _keys;
+ Debug.Assert(keys != null);
+
+ int c = Comparer<TKey>.Default.Compare(keys[index1], keys[index2]);
+ return
+ c == 0 ? index1 - index2 : // ensure stability of sort
+ c;
+ }
- i++;
- j--;
- }
- while (i <= j);
+ private int CompareAnyKeys_DefaultComparer_NoNext_Descending(int index1, int index2)
+ {
+ Debug.Assert(typeof(TKey).IsValueType);
+ Debug.Assert(_comparer == Comparer<TKey>.Default);
+ Debug.Assert(_next is null);
+ Debug.Assert(_descending);
+
+ TKey[]? keys = _keys;
+ Debug.Assert(keys != null);
+
+ int c = Comparer<TKey>.Default.Compare(keys[index2], keys[index1]);
+ return
+ c == 0 ? index1 - index2 : // ensure stability of sort
+ c;
+ }
- if (minIdx >= i)
- {
- left = i + 1;
- }
- else if (maxIdx <= j)
- {
- right = j - 1;
- }
+ private int CompareKeys(int index1, int index2) => index1 == index2 ? 0 : CompareAnyKeys(index1, index2);
- if (j - left <= right - i)
+ protected override void QuickSort(int[] keys, int lo, int hi)
+ {
+ Comparison<int> comparison;
+
+ if (typeof(TKey).IsValueType && _next is null && _comparer == Comparer<TKey>.Default)
{
- if (left < j)
+ // We can use Comparer<TKey>.Default.Compare and benefit from devirtualization and inlining.
+ // We can also avoid extra steps to check whether we need to deal with a subsequent tie breaker (_next).
+ if (!_descending)
{
- PartialQuickSort(map, left, j, minIdx, maxIdx);
+ comparison = CompareAnyKeys_DefaultComparer_NoNext_Ascending;
+ }
+ else
+ {
+ comparison = CompareAnyKeys_DefaultComparer_NoNext_Descending;
}
-
- left = i;
}
else
{
- if (i < right)
- {
- PartialQuickSort(map, i, right, minIdx, maxIdx);
- }
-
- right = j;
+ comparison = CompareAnyKeys;
}
+
+ new Span<int>(keys, lo, hi - lo + 1).Sort(comparison);
}
- while (left < right);
- }
- // Finds the element that would be at idx if the collection was sorted.
- // Time complexity: O(n) best and average case. O(n^2) worse case.
- protected override int QuickSelect(int[] map, int right, int idx)
- {
- int left = 0;
- do
+ // Sorts the k elements between minIdx and maxIdx without sorting all elements
+ // Time complexity: O(n + k log k) best and average case. O(n^2) worse case.
+ protected override void PartialQuickSort(int[] map, int left, int right, int minIdx, int maxIdx)
{
- int i = left;
- int j = right;
- int x = map[i + ((j - i) >> 1)];
do
{
- while (i < map.Length && CompareKeys(x, map[i]) > 0)
+ int i = left;
+ int j = right;
+ int x = map[i + ((j - i) >> 1)];
+ do
{
+ while (i < map.Length && CompareKeys(x, map[i]) > 0)
+ {
+ i++;
+ }
+
+ while (j >= 0 && CompareKeys(x, map[j]) < 0)
+ {
+ j--;
+ }
+
+ if (i > j)
+ {
+ break;
+ }
+
+ if (i < j)
+ {
+ int temp = map[i];
+ map[i] = map[j];
+ map[j] = temp;
+ }
+
i++;
+ j--;
}
+ while (i <= j);
- while (j >= 0 && CompareKeys(x, map[j]) < 0)
+ if (minIdx >= i)
{
- j--;
+ left = i + 1;
}
-
- if (i > j)
+ else if (maxIdx <= j)
{
- break;
+ right = j - 1;
}
- if (i < j)
+ if (j - left <= right - i)
{
- int temp = map[i];
- map[i] = map[j];
- map[j] = temp;
+ if (left < j)
+ {
+ PartialQuickSort(map, left, j, minIdx, maxIdx);
+ }
+
+ left = i;
}
+ else
+ {
+ if (i < right)
+ {
+ PartialQuickSort(map, i, right, minIdx, maxIdx);
+ }
- i++;
- j--;
+ right = j;
+ }
}
- while (i <= j);
+ while (left < right);
+ }
- if (i <= idx)
- {
- left = i + 1;
- }
- else
+ // Finds the element that would be at idx if the collection was sorted.
+ // Time complexity: O(n) best and average case. O(n^2) worse case.
+ protected override int QuickSelect(int[] map, int right, int idx)
+ {
+ int left = 0;
+ do
{
- right = j - 1;
- }
+ int i = left;
+ int j = right;
+ int x = map[i + ((j - i) >> 1)];
+ do
+ {
+ while (i < map.Length && CompareKeys(x, map[i]) > 0)
+ {
+ i++;
+ }
+
+ while (j >= 0 && CompareKeys(x, map[j]) < 0)
+ {
+ j--;
+ }
+
+ if (i > j)
+ {
+ break;
+ }
+
+ if (i < j)
+ {
+ int temp = map[i];
+ map[i] = map[j];
+ map[j] = temp;
+ }
- if (j - left <= right - i)
- {
- if (left < j)
+ i++;
+ j--;
+ }
+ while (i <= j);
+
+ if (i <= idx)
{
- right = j;
+ left = i + 1;
+ }
+ else
+ {
+ right = j - 1;
}
- left = i;
- }
- else
- {
- if (i < right)
+ if (j - left <= right - i)
{
+ if (left < j)
+ {
+ right = j;
+ }
+
left = i;
}
+ else
+ {
+ if (i < right)
+ {
+ left = i;
+ }
- right = j;
+ right = j;
+ }
}
- }
- while (left < right);
+ while (left < right);
- return map[idx];
- }
+ return map[idx];
+ }
- protected override int Min(int[] map, int count)
- {
- int index = 0;
- for (int i = 1; i < count; i++)
+ protected override int Min(int[] map, int count)
{
- if (CompareKeys(map[i], map[index]) < 0)
+ int index = 0;
+ for (int i = 1; i < count; i++)
{
- index = i;
+ if (CompareKeys(map[i], map[index]) < 0)
+ {
+ index = i;
+ }
}
+ return map[index];
}
- return map[index];
}
}
}
diff --git a/src/libraries/System.Linq/src/System/Linq/Range.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/Range.SpeedOpt.cs
index c125673e16d3..bde2cede4b1b 100644
--- a/src/libraries/System.Linq/src/System/Linq/Range.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Range.SpeedOpt.cs
@@ -10,14 +10,14 @@ namespace System.Linq
{
public static partial class Enumerable
{
- private sealed partial class RangeIterator : IPartition<int>, IList<int>, IReadOnlyList<int>
+ private sealed partial class RangeIterator : IList<int>, IReadOnlyList<int>
{
public override IEnumerable<TResult> Select<TResult>(Func<int, TResult> selector)
{
- return new SelectRangeIterator<TResult>(_start, _end, selector);
+ return new RangeSelectIterator<TResult>(_start, _end, selector);
}
- public int[] ToArray()
+ public override int[] ToArray()
{
int start = _start;
int[] array = new int[_end - start];
@@ -25,7 +25,7 @@ namespace System.Linq
return array;
}
- public List<int> ToList()
+ public override List<int> ToList()
{
(int start, int end) = (_start, _end);
List<int> list = new List<int>(end - start);
@@ -67,11 +67,11 @@ namespace System.Linq
}
}
- public int GetCount(bool onlyIfCheap) => _end - _start;
+ public override int GetCount(bool onlyIfCheap) => _end - _start;
public int Count => _end - _start;
- public IPartition<int>? Skip(int count)
+ public override Iterator<int>? Skip(int count)
{
if (count >= _end - _start)
{
@@ -81,7 +81,7 @@ namespace System.Linq
return new RangeIterator(_start + count, _end - _start - count);
}
- public IPartition<int> Take(int count)
+ public override Iterator<int> Take(int count)
{
int curCount = _end - _start;
if (count >= curCount)
@@ -92,7 +92,7 @@ namespace System.Linq
return new RangeIterator(_start, count);
}
- public int TryGetElementAt(int index, out bool found)
+ public override int TryGetElementAt(int index, out bool found)
{
if ((uint)index < (uint)(_end - _start))
{
@@ -104,13 +104,13 @@ namespace System.Linq
return 0;
}
- public int TryGetFirst(out bool found)
+ public override int TryGetFirst(out bool found)
{
found = true;
return _start;
}
- public int TryGetLast(out bool found)
+ public override int TryGetLast(out bool found)
{
found = true;
return _end - 1;
diff --git a/src/libraries/System.Linq/src/System/Linq/Repeat.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/Repeat.SpeedOpt.cs
index 3c25ee20ba5f..5ca5b2625b10 100644
--- a/src/libraries/System.Linq/src/System/Linq/Repeat.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Repeat.SpeedOpt.cs
@@ -8,12 +8,9 @@ namespace System.Linq
{
public static partial class Enumerable
{
- private sealed partial class RepeatIterator<TResult> : IPartition<TResult>, IList<TResult>, IReadOnlyList<TResult>
+ private sealed partial class RepeatIterator<TResult> : IList<TResult>, IReadOnlyList<TResult>
{
- public override IEnumerable<TResult2> Select<TResult2>(Func<TResult, TResult2> selector) =>
- new SelectIPartitionIterator<TResult, TResult2>(this, selector);
-
- public TResult[] ToArray()
+ public override TResult[] ToArray()
{
TResult[] array = new TResult[_count];
if (_current != null)
@@ -24,7 +21,7 @@ namespace System.Linq
return array;
}
- public List<TResult> ToList()
+ public override List<TResult> ToList()
{
List<TResult> list = new List<TResult>(_count);
SetCountAndGetSpan(list, _count).Fill(_current);
@@ -32,11 +29,11 @@ namespace System.Linq
return list;
}
- public int GetCount(bool onlyIfCheap) => _count;
+ public override int GetCount(bool onlyIfCheap) => _count;
public int Count => _count;
- public IPartition<TResult>? Skip(int count)
+ public override Iterator<TResult>? Skip(int count)
{
Debug.Assert(count > 0);
@@ -48,7 +45,7 @@ namespace System.Linq
return new RepeatIterator<TResult>(_current, _count - count);
}
- public IPartition<TResult> Take(int count)
+ public override Iterator<TResult> Take(int count)
{
Debug.Assert(count > 0);
@@ -60,7 +57,7 @@ namespace System.Linq
return new RepeatIterator<TResult>(_current, count);
}
- public TResult? TryGetElementAt(int index, out bool found)
+ public override TResult? TryGetElementAt(int index, out bool found)
{
if ((uint)index < (uint)_count)
{
@@ -72,13 +69,13 @@ namespace System.Linq
return default;
}
- public TResult TryGetFirst(out bool found)
+ public override TResult TryGetFirst(out bool found)
{
found = true;
return _current;
}
- public TResult TryGetLast(out bool found)
+ public override TResult TryGetLast(out bool found)
{
found = true;
return _current;
diff --git a/src/libraries/System.Linq/src/System/Linq/Reverse.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/Reverse.SpeedOpt.cs
index bb301cc30848..d1ec26de879a 100644
--- a/src/libraries/System.Linq/src/System/Linq/Reverse.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Reverse.SpeedOpt.cs
@@ -7,28 +7,28 @@ namespace System.Linq
{
public static partial class Enumerable
{
- private sealed partial class ReverseIterator<TSource> : IPartition<TSource>
+ private sealed partial class ReverseIterator<TSource>
{
- public TSource[] ToArray()
+ public override TSource[] ToArray()
{
TSource[] array = _source.ToArray();
Array.Reverse(array);
return array;
}
- public List<TSource> ToList()
+ public override List<TSource> ToList()
{
List<TSource> list = _source.ToList();
list.Reverse();
return list;
}
- public int GetCount(bool onlyIfCheap) =>
+ public override int GetCount(bool onlyIfCheap) =>
!onlyIfCheap ? _source.Count() :
TryGetNonEnumeratedCount(_source, out int count) ? count :
-1;
- public TSource? TryGetElementAt(int index, out bool found)
+ public override TSource? TryGetElementAt(int index, out bool found)
{
if (_source is IList<TSource> list)
{
@@ -53,11 +53,11 @@ namespace System.Linq
return default;
}
- public TSource? TryGetFirst(out bool found)
+ public override TSource? TryGetFirst(out bool found)
{
- if (_source is IPartition<TSource> partition)
+ if (_source is Iterator<TSource> iterator)
{
- return partition.TryGetLast(out found);
+ return iterator.TryGetLast(out found);
}
else if (_source is IList<TSource> list)
{
@@ -89,11 +89,11 @@ namespace System.Linq
return default;
}
- public TSource? TryGetLast(out bool found)
+ public override TSource? TryGetLast(out bool found)
{
- if (_source is IPartition<TSource> partition)
+ if (_source is Iterator<TSource> iterator)
{
- return partition.TryGetFirst(out found);
+ return iterator.TryGetFirst(out found);
}
else if (_source is IList<TSource> list)
{
@@ -116,10 +116,6 @@ namespace System.Linq
found = false;
return default;
}
-
- public IPartition<TSource>? Skip(int count) => new EnumerablePartition<TSource>(this, count, -1);
-
- public IPartition<TSource>? Take(int count) => new EnumerablePartition<TSource>(this, 0, count - 1);
}
}
}
diff --git a/src/libraries/System.Linq/src/System/Linq/Select.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/Select.SpeedOpt.cs
index 06f87db9c0f7..f55b656033e3 100644
--- a/src/libraries/System.Linq/src/System/Linq/Select.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Select.SpeedOpt.cs
@@ -10,15 +10,9 @@ namespace System.Linq
{
public static partial class Enumerable
{
- static partial void CreateSelectIPartitionIterator<TResult, TSource>(
- Func<TSource, TResult> selector, IPartition<TSource> partition, ref IEnumerable<TResult>? result)
+ private sealed partial class IEnumerableSelectIterator<TSource, TResult>
{
- result = new SelectIPartitionIterator<TSource, TResult>(partition, selector);
- }
-
- private sealed partial class SelectEnumerableIterator<TSource, TResult> : IIListProvider<TResult>
- {
- public TResult[] ToArray()
+ public override TResult[] ToArray()
{
SegmentedArrayBuilder<TResult>.ScratchBuffer scratch = default;
SegmentedArrayBuilder<TResult> builder = new(scratch);
@@ -35,7 +29,7 @@ namespace System.Linq
return result;
}
- public List<TResult> ToList()
+ public override List<TResult> ToList()
{
var list = new List<TResult>();
@@ -48,7 +42,7 @@ namespace System.Linq
return list;
}
- public int GetCount(bool onlyIfCheap)
+ public override int GetCount(bool onlyIfCheap)
{
// In case someone uses Count() to force evaluation of
// the selector, run it provided `onlyIfCheap` is false.
@@ -71,11 +65,73 @@ namespace System.Linq
return count;
}
+
+ public override TResult? TryGetElementAt(int index, out bool found)
+ {
+ if (index >= 0)
+ {
+ IEnumerator<TSource> e = _source.GetEnumerator();
+ try
+ {
+ while (e.MoveNext())
+ {
+ if (index == 0)
+ {
+ found = true;
+ return _selector(e.Current);
+ }
+
+ index--;
+ }
+ }
+ finally
+ {
+ (e as IDisposable)?.Dispose();
+ }
+ }
+
+ found = false;
+ return default;
+ }
+
+ public override TResult? TryGetFirst(out bool found)
+ {
+ using IEnumerator<TSource> e = _source.GetEnumerator();
+ if (e.MoveNext())
+ {
+ found = true;
+ return _selector(e.Current);
+ }
+
+ found = false;
+ return default;
+ }
+
+ public override TResult? TryGetLast(out bool found)
+ {
+ using IEnumerator<TSource> e = _source.GetEnumerator();
+
+ if (e.MoveNext())
+ {
+ found = true;
+ TSource last = e.Current;
+
+ while (e.MoveNext())
+ {
+ last = e.Current;
+ }
+
+ return _selector(last);
+ }
+
+ found = false;
+ return default;
+ }
}
- private sealed partial class SelectArrayIterator<TSource, TResult> : IPartition<TResult>
+ private sealed partial class ArraySelectIterator<TSource, TResult>
{
- public TResult[] ToArray()
+ public override TResult[] ToArray()
{
// See assert in constructor.
// Since _source should never be empty, we don't check for 0/return Array.Empty.
@@ -88,7 +144,7 @@ namespace System.Linq
return results;
}
- public List<TResult> ToList()
+ public override List<TResult> ToList()
{
TSource[] source = _source;
Debug.Assert(source.Length > 0);
@@ -107,7 +163,7 @@ namespace System.Linq
}
}
- public int GetCount(bool onlyIfCheap)
+ public override int GetCount(bool onlyIfCheap)
{
// In case someone uses Count() to force evaluation of
// the selector, run it provided `onlyIfCheap` is false.
@@ -123,7 +179,7 @@ namespace System.Linq
return _source.Length;
}
- public IPartition<TResult>? Skip(int count)
+ public override Iterator<TResult>? Skip(int count)
{
Debug.Assert(count > 0);
if (count >= _source.Length)
@@ -131,30 +187,31 @@ namespace System.Linq
return null;
}
- return new SelectListPartitionIterator<TSource, TResult>(_source, _selector, count, int.MaxValue);
+ return new IListSkipTakeSelectIterator<TSource, TResult>(_source, _selector, count, int.MaxValue);
}
- public IPartition<TResult> Take(int count)
+ public override Iterator<TResult> Take(int count)
{
Debug.Assert(count > 0);
return count >= _source.Length ?
this :
- new SelectListPartitionIterator<TSource, TResult>(_source, _selector, 0, count - 1);
+ new IListSkipTakeSelectIterator<TSource, TResult>(_source, _selector, 0, count - 1);
}
- public TResult? TryGetElementAt(int index, out bool found)
+ public override TResult? TryGetElementAt(int index, out bool found)
{
- if ((uint)index < (uint)_source.Length)
+ TSource[] source = _source;
+ if ((uint)index < (uint)source.Length)
{
found = true;
- return _selector(_source[index]);
+ return _selector(source[index]);
}
found = false;
return default;
}
- public TResult TryGetFirst(out bool found)
+ public override TResult TryGetFirst(out bool found)
{
Debug.Assert(_source.Length > 0); // See assert in constructor
@@ -162,22 +219,22 @@ namespace System.Linq
return _selector(_source[0]);
}
- public TResult TryGetLast(out bool found)
+ public override TResult TryGetLast(out bool found)
{
Debug.Assert(_source.Length > 0); // See assert in constructor
found = true;
- return _selector(_source[_source.Length - 1]);
+ return _selector(_source[^1]);
}
}
- private sealed partial class SelectRangeIterator<TResult> : Iterator<TResult>, IPartition<TResult>
+ private sealed partial class RangeSelectIterator<TResult> : Iterator<TResult>
{
private readonly int _start;
private readonly int _end;
private readonly Func<int, TResult> _selector;
- public SelectRangeIterator(int start, int end, Func<int, TResult> selector)
+ public RangeSelectIterator(int start, int end, Func<int, TResult> selector)
{
Debug.Assert(start < end);
Debug.Assert((uint)(end - start) <= (uint)int.MaxValue);
@@ -189,7 +246,7 @@ namespace System.Linq
}
public override Iterator<TResult> Clone() =>
- new SelectRangeIterator<TResult>(_start, _end, _selector);
+ new RangeSelectIterator<TResult>(_start, _end, _selector);
public override bool MoveNext()
{
@@ -206,9 +263,9 @@ namespace System.Linq
}
public override IEnumerable<TResult2> Select<TResult2>(Func<TResult, TResult2> selector) =>
- new SelectRangeIterator<TResult2>(_start, _end, CombineSelectors(_selector, selector));
+ new RangeSelectIterator<TResult2>(_start, _end, CombineSelectors(_selector, selector));
- public TResult[] ToArray()
+ public override TResult[] ToArray()
{
var results = new TResult[_end - _start];
Fill(results, _start, _selector);
@@ -216,7 +273,7 @@ namespace System.Linq
return results;
}
- public List<TResult> ToList()
+ public override List<TResult> ToList()
{
var results = new List<TResult>(_end - _start);
Fill(SetCountAndGetSpan(results, _end - _start), _start, _selector);
@@ -232,7 +289,7 @@ namespace System.Linq
}
}
- public int GetCount(bool onlyIfCheap)
+ public override int GetCount(bool onlyIfCheap)
{
// In case someone uses Count() to force evaluation of the selector,
// run it provided `onlyIfCheap` is false.
@@ -247,7 +304,7 @@ namespace System.Linq
return _end - _start;
}
- public IPartition<TResult>? Skip(int count)
+ public override Iterator<TResult>? Skip(int count)
{
Debug.Assert(count > 0);
@@ -256,10 +313,10 @@ namespace System.Linq
return null;
}
- return new SelectRangeIterator<TResult>(_start + count, _end, _selector);
+ return new RangeSelectIterator<TResult>(_start + count, _end, _selector);
}
- public IPartition<TResult> Take(int count)
+ public override Iterator<TResult> Take(int count)
{
Debug.Assert(count > 0);
@@ -268,10 +325,10 @@ namespace System.Linq
return this;
}
- return new SelectRangeIterator<TResult>(_start, _start + count, _selector);
+ return new RangeSelectIterator<TResult>(_start, _start + count, _selector);
}
- public TResult? TryGetElementAt(int index, out bool found)
+ public override TResult? TryGetElementAt(int index, out bool found)
{
if ((uint)index < (uint)(_end - _start))
{
@@ -283,14 +340,14 @@ namespace System.Linq
return default;
}
- public TResult TryGetFirst(out bool found)
+ public override TResult TryGetFirst(out bool found)
{
Debug.Assert(_end > _start);
found = true;
return _selector(_start);
}
- public TResult TryGetLast(out bool found)
+ public override TResult TryGetLast(out bool found)
{
Debug.Assert(_end > _start);
found = true;
@@ -298,9 +355,9 @@ namespace System.Linq
}
}
- private sealed partial class SelectListIterator<TSource, TResult> : IPartition<TResult>
+ private sealed partial class ListSelectIterator<TSource, TResult>
{
- public TResult[] ToArray()
+ public override TResult[] ToArray()
{
ReadOnlySpan<TSource> source = CollectionsMarshal.AsSpan(_source);
if (source.Length == 0)
@@ -314,7 +371,7 @@ namespace System.Linq
return results;
}
- public List<TResult> ToList()
+ public override List<TResult> ToList()
{
ReadOnlySpan<TSource> source = CollectionsMarshal.AsSpan(_source);
@@ -332,7 +389,7 @@ namespace System.Linq
}
}
- public int GetCount(bool onlyIfCheap)
+ public override int GetCount(bool onlyIfCheap)
{
// In case someone uses Count() to force evaluation of
// the selector, run it provided `onlyIfCheap` is false.
@@ -350,19 +407,19 @@ namespace System.Linq
return count;
}
- public IPartition<TResult> Skip(int count)
+ public override Iterator<TResult> Skip(int count)
{
Debug.Assert(count > 0);
- return new SelectListPartitionIterator<TSource, TResult>(_source, _selector, count, int.MaxValue);
+ return new IListSkipTakeSelectIterator<TSource, TResult>(_source, _selector, count, int.MaxValue);
}
- public IPartition<TResult> Take(int count)
+ public override Iterator<TResult> Take(int count)
{
Debug.Assert(count > 0);
- return new SelectListPartitionIterator<TSource, TResult>(_source, _selector, 0, count - 1);
+ return new IListSkipTakeSelectIterator<TSource, TResult>(_source, _selector, 0, count - 1);
}
- public TResult? TryGetElementAt(int index, out bool found)
+ public override TResult? TryGetElementAt(int index, out bool found)
{
if ((uint)index < (uint)_source.Count)
{
@@ -374,7 +431,7 @@ namespace System.Linq
return default;
}
- public TResult? TryGetFirst(out bool found)
+ public override TResult? TryGetFirst(out bool found)
{
if (_source.Count != 0)
{
@@ -386,7 +443,7 @@ namespace System.Linq
return default;
}
- public TResult? TryGetLast(out bool found)
+ public override TResult? TryGetLast(out bool found)
{
int len = _source.Count;
if (len != 0)
@@ -400,9 +457,9 @@ namespace System.Linq
}
}
- private sealed partial class SelectIListIterator<TSource, TResult> : IPartition<TResult>
+ private sealed partial class IListSelectIterator<TSource, TResult>
{
- public TResult[] ToArray()
+ public override TResult[] ToArray()
{
int count = _source.Count;
if (count == 0)
@@ -416,7 +473,7 @@ namespace System.Linq
return results;
}
- public List<TResult> ToList()
+ public override List<TResult> ToList()
{
IList<TSource> source = _source;
int count = _source.Count;
@@ -435,7 +492,7 @@ namespace System.Linq
}
}
- public int GetCount(bool onlyIfCheap)
+ public override int GetCount(bool onlyIfCheap)
{
// In case someone uses Count() to force evaluation of
// the selector, run it provided `onlyIfCheap` is false.
@@ -453,19 +510,19 @@ namespace System.Linq
return count;
}
- public IPartition<TResult> Skip(int count)
+ public override Iterator<TResult> Skip(int count)
{
Debug.Assert(count > 0);
- return new SelectListPartitionIterator<TSource, TResult>(_source, _selector, count, int.MaxValue);
+ return new IListSkipTakeSelectIterator<TSource, TResult>(_source, _selector, count, int.MaxValue);
}
- public IPartition<TResult> Take(int count)
+ public override Iterator<TResult> Take(int count)
{
Debug.Assert(count > 0);
- return new SelectListPartitionIterator<TSource, TResult>(_source, _selector, 0, count - 1);
+ return new IListSkipTakeSelectIterator<TSource, TResult>(_source, _selector, 0, count - 1);
}
- public TResult? TryGetElementAt(int index, out bool found)
+ public override TResult? TryGetElementAt(int index, out bool found)
{
if ((uint)index < (uint)_source.Count)
{
@@ -477,7 +534,7 @@ namespace System.Linq
return default;
}
- public TResult? TryGetFirst(out bool found)
+ public override TResult? TryGetFirst(out bool found)
{
if (_source.Count != 0)
{
@@ -489,7 +546,7 @@ namespace System.Linq
return default;
}
- public TResult? TryGetLast(out bool found)
+ public override TResult? TryGetLast(out bool found)
{
int len = _source.Count;
if (len != 0)
@@ -504,17 +561,17 @@ namespace System.Linq
}
/// <summary>
- /// An iterator that maps each item of an <see cref="IPartition{TSource}"/>.
+ /// An iterator that maps each item of an <see cref="Iterator{TSource}"/>.
/// </summary>
- /// <typeparam name="TSource">The type of the source partition.</typeparam>
+ /// <typeparam name="TSource">The type of the source elements.</typeparam>
/// <typeparam name="TResult">The type of the mapped items.</typeparam>
- private sealed class SelectIPartitionIterator<TSource, TResult> : Iterator<TResult>, IPartition<TResult>
+ private sealed class IteratorSelectIterator<TSource, TResult> : Iterator<TResult>
{
- private readonly IPartition<TSource> _source;
+ private readonly Iterator<TSource> _source;
private readonly Func<TSource, TResult> _selector;
private IEnumerator<TSource>? _enumerator;
- public SelectIPartitionIterator(IPartition<TSource> source, Func<TSource, TResult> selector)
+ public IteratorSelectIterator(Iterator<TSource> source, Func<TSource, TResult> selector)
{
Debug.Assert(source != null);
Debug.Assert(selector != null);
@@ -523,7 +580,7 @@ namespace System.Linq
}
public override Iterator<TResult> Clone() =>
- new SelectIPartitionIterator<TSource, TResult>(_source, _selector);
+ new IteratorSelectIterator<TSource, TResult>(_source, _selector);
public override bool MoveNext()
{
@@ -560,23 +617,23 @@ namespace System.Linq
}
public override IEnumerable<TResult2> Select<TResult2>(Func<TResult, TResult2> selector) =>
- new SelectIPartitionIterator<TSource, TResult2>(_source, CombineSelectors(_selector, selector));
+ new IteratorSelectIterator<TSource, TResult2>(_source, CombineSelectors(_selector, selector));
- public IPartition<TResult>? Skip(int count)
+ public override Iterator<TResult>? Skip(int count)
{
Debug.Assert(count > 0);
- IPartition<TSource>? source = _source.Skip(count);
- return source is null ? null : new SelectIPartitionIterator<TSource, TResult>(source, _selector);
+ Iterator<TSource>? source = _source.Skip(count);
+ return source is null ? null : new IteratorSelectIterator<TSource, TResult>(source, _selector);
}
- public IPartition<TResult>? Take(int count)
+ public override Iterator<TResult>? Take(int count)
{
Debug.Assert(count > 0);
- IPartition<TSource>? source = _source.Take(count);
- return source is null ? null : new SelectIPartitionIterator<TSource, TResult>(source, _selector);
+ Iterator<TSource>? source = _source.Take(count);
+ return source is null ? null : new IteratorSelectIterator<TSource, TResult>(source, _selector);
}
- public TResult? TryGetElementAt(int index, out bool found)
+ public override TResult? TryGetElementAt(int index, out bool found)
{
bool sourceFound;
TSource? input = _source.TryGetElementAt(index, out sourceFound);
@@ -584,7 +641,7 @@ namespace System.Linq
return sourceFound ? _selector(input!) : default!;
}
- public TResult? TryGetFirst(out bool found)
+ public override TResult? TryGetFirst(out bool found)
{
bool sourceFound;
TSource? input = _source.TryGetFirst(out sourceFound);
@@ -592,7 +649,7 @@ namespace System.Linq
return sourceFound ? _selector(input!) : default!;
}
- public TResult? TryGetLast(out bool found)
+ public override TResult? TryGetLast(out bool found)
{
bool sourceFound;
TSource? input = _source.TryGetLast(out sourceFound);
@@ -629,7 +686,7 @@ namespace System.Linq
return array;
}
- public TResult[] ToArray()
+ public override TResult[] ToArray()
{
int count = _source.GetCount(onlyIfCheap: true);
return count switch
@@ -640,7 +697,7 @@ namespace System.Linq
};
}
- public List<TResult> ToList()
+ public override List<TResult> ToList()
{
int count = _source.GetCount(onlyIfCheap: true);
List<TResult> list;
@@ -665,7 +722,7 @@ namespace System.Linq
return list;
}
- private static void Fill(IPartition<TSource> source, Span<TResult> results, Func<TSource, TResult> func)
+ private static void Fill(Iterator<TSource> source, Span<TResult> results, Func<TSource, TResult> func)
{
int index = 0;
foreach (TSource item in source)
@@ -677,7 +734,7 @@ namespace System.Linq
Debug.Assert(index == results.Length, "All list elements were not initialized.");
}
- public int GetCount(bool onlyIfCheap)
+ public override int GetCount(bool onlyIfCheap)
{
if (!onlyIfCheap)
{
@@ -705,14 +762,14 @@ namespace System.Linq
/// <typeparam name="TSource">The type of the source list.</typeparam>
/// <typeparam name="TResult">The type of the mapped items.</typeparam>
[DebuggerDisplay("Count = {Count}")]
- private sealed class SelectListPartitionIterator<TSource, TResult> : Iterator<TResult>, IPartition<TResult>
+ private sealed class IListSkipTakeSelectIterator<TSource, TResult> : Iterator<TResult>
{
private readonly IList<TSource> _source;
private readonly Func<TSource, TResult> _selector;
private readonly int _minIndexInclusive;
private readonly int _maxIndexInclusive;
- public SelectListPartitionIterator(IList<TSource> source, Func<TSource, TResult> selector, int minIndexInclusive, int maxIndexInclusive)
+ public IListSkipTakeSelectIterator(IList<TSource> source, Func<TSource, TResult> selector, int minIndexInclusive, int maxIndexInclusive)
{
Debug.Assert(source != null);
Debug.Assert(selector != null);
@@ -725,7 +782,7 @@ namespace System.Linq
}
public override Iterator<TResult> Clone() =>
- new SelectListPartitionIterator<TSource, TResult>(_source, _selector, _minIndexInclusive, _maxIndexInclusive);
+ new IListSkipTakeSelectIterator<TSource, TResult>(_source, _selector, _minIndexInclusive, _maxIndexInclusive);
public override bool MoveNext()
{
@@ -745,23 +802,23 @@ namespace System.Linq
}
public override IEnumerable<TResult2> Select<TResult2>(Func<TResult, TResult2> selector) =>
- new SelectListPartitionIterator<TSource, TResult2>(_source, CombineSelectors(_selector, selector), _minIndexInclusive, _maxIndexInclusive);
+ new IListSkipTakeSelectIterator<TSource, TResult2>(_source, CombineSelectors(_selector, selector), _minIndexInclusive, _maxIndexInclusive);
- public IPartition<TResult>? Skip(int count)
+ public override Iterator<TResult>? Skip(int count)
{
Debug.Assert(count > 0);
int minIndex = _minIndexInclusive + count;
- return (uint)minIndex > (uint)_maxIndexInclusive ? null : new SelectListPartitionIterator<TSource, TResult>(_source, _selector, minIndex, _maxIndexInclusive);
+ return (uint)minIndex > (uint)_maxIndexInclusive ? null : new IListSkipTakeSelectIterator<TSource, TResult>(_source, _selector, minIndex, _maxIndexInclusive);
}
- public IPartition<TResult> Take(int count)
+ public override Iterator<TResult> Take(int count)
{
Debug.Assert(count > 0);
int maxIndex = _minIndexInclusive + count - 1;
- return (uint)maxIndex >= (uint)_maxIndexInclusive ? this : new SelectListPartitionIterator<TSource, TResult>(_source, _selector, _minIndexInclusive, maxIndex);
+ return (uint)maxIndex >= (uint)_maxIndexInclusive ? this : new IListSkipTakeSelectIterator<TSource, TResult>(_source, _selector, _minIndexInclusive, maxIndex);
}
- public TResult? TryGetElementAt(int index, out bool found)
+ public override TResult? TryGetElementAt(int index, out bool found)
{
if ((uint)index <= (uint)(_maxIndexInclusive - _minIndexInclusive) && index < _source.Count - _minIndexInclusive)
{
@@ -773,7 +830,7 @@ namespace System.Linq
return default;
}
- public TResult? TryGetFirst(out bool found)
+ public override TResult? TryGetFirst(out bool found)
{
if (_source.Count > _minIndexInclusive)
{
@@ -785,7 +842,7 @@ namespace System.Linq
return default;
}
- public TResult? TryGetLast(out bool found)
+ public override TResult? TryGetLast(out bool found)
{
int lastIndex = _source.Count - 1;
if (lastIndex >= _minIndexInclusive)
@@ -812,7 +869,7 @@ namespace System.Linq
}
}
- public TResult[] ToArray()
+ public override TResult[] ToArray()
{
int count = Count;
if (count == 0)
@@ -826,7 +883,7 @@ namespace System.Linq
return array;
}
- public List<TResult> ToList()
+ public override List<TResult> ToList()
{
int count = Count;
if (count == 0)
@@ -848,7 +905,7 @@ namespace System.Linq
}
}
- public int GetCount(bool onlyIfCheap)
+ public override int GetCount(bool onlyIfCheap)
{
// In case someone uses Count() to force evaluation of
// the selector, run it provided `onlyIfCheap` is false.
diff --git a/src/libraries/System.Linq/src/System/Linq/Select.cs b/src/libraries/System.Linq/src/System/Linq/Select.cs
index 3059fa7f0a8e..916267c1ecaf 100644
--- a/src/libraries/System.Linq/src/System/Linq/Select.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Select.cs
@@ -37,35 +37,20 @@ namespace System.Linq
return [];
}
- return new SelectArrayIterator<TSource, TResult>(array, selector);
+ return new ArraySelectIterator<TSource, TResult>(array, selector);
}
if (source is List<TSource> list)
{
- return new SelectListIterator<TSource, TResult>(list, selector);
+ return new ListSelectIterator<TSource, TResult>(list, selector);
}
- return new SelectIListIterator<TSource, TResult>(ilist, selector);
+ return new IListSelectIterator<TSource, TResult>(ilist, selector);
}
- if (source is IPartition<TSource> partition)
- {
- IEnumerable<TResult>? result = null;
- CreateSelectIPartitionIterator(selector, partition, ref result);
- if (result != null)
- {
- return result;
- }
- }
-
- return new SelectEnumerableIterator<TSource, TResult>(source, selector);
+ return new IEnumerableSelectIterator<TSource, TResult>(source, selector);
}
-#pragma warning disable IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6177
- static partial void CreateSelectIPartitionIterator<TResult, TSource>(
- Func<TSource, TResult> selector, IPartition<TSource> partition, [NotNull] ref IEnumerable<TResult>? result);
-#pragma warning restore IDE0060
-
public static IEnumerable<TResult> Select<TSource, TResult>(this IEnumerable<TSource> source, Func<TSource, int, TResult> selector)
{
if (source == null)
@@ -105,13 +90,13 @@ namespace System.Linq
/// </summary>
/// <typeparam name="TSource">The type of the source enumerable.</typeparam>
/// <typeparam name="TResult">The type of the mapped items.</typeparam>
- private sealed partial class SelectEnumerableIterator<TSource, TResult> : Iterator<TResult>
+ private sealed partial class IEnumerableSelectIterator<TSource, TResult> : Iterator<TResult>
{
private readonly IEnumerable<TSource> _source;
private readonly Func<TSource, TResult> _selector;
private IEnumerator<TSource>? _enumerator;
- public SelectEnumerableIterator(IEnumerable<TSource> source, Func<TSource, TResult> selector)
+ public IEnumerableSelectIterator(IEnumerable<TSource> source, Func<TSource, TResult> selector)
{
Debug.Assert(source != null);
Debug.Assert(selector != null);
@@ -120,7 +105,7 @@ namespace System.Linq
}
public override Iterator<TResult> Clone() =>
- new SelectEnumerableIterator<TSource, TResult>(_source, _selector);
+ new IEnumerableSelectIterator<TSource, TResult>(_source, _selector);
public override void Dispose()
{
@@ -157,7 +142,7 @@ namespace System.Linq
}
public override IEnumerable<TResult2> Select<TResult2>(Func<TResult, TResult2> selector) =>
- new SelectEnumerableIterator<TSource, TResult2>(_source, CombineSelectors(_selector, selector));
+ new IEnumerableSelectIterator<TSource, TResult2>(_source, CombineSelectors(_selector, selector));
}
/// <summary>
@@ -166,12 +151,12 @@ namespace System.Linq
/// <typeparam name="TSource">The type of the source array.</typeparam>
/// <typeparam name="TResult">The type of the mapped items.</typeparam>
[DebuggerDisplay("Count = {CountForDebugger}")]
- private sealed partial class SelectArrayIterator<TSource, TResult> : Iterator<TResult>
+ private sealed partial class ArraySelectIterator<TSource, TResult> : Iterator<TResult>
{
private readonly TSource[] _source;
private readonly Func<TSource, TResult> _selector;
- public SelectArrayIterator(TSource[] source, Func<TSource, TResult> selector)
+ public ArraySelectIterator(TSource[] source, Func<TSource, TResult> selector)
{
Debug.Assert(source != null);
Debug.Assert(selector != null);
@@ -182,7 +167,7 @@ namespace System.Linq
private int CountForDebugger => _source.Length;
- public override Iterator<TResult> Clone() => new SelectArrayIterator<TSource, TResult>(_source, _selector);
+ public override Iterator<TResult> Clone() => new ArraySelectIterator<TSource, TResult>(_source, _selector);
public override bool MoveNext()
{
@@ -200,7 +185,7 @@ namespace System.Linq
}
public override IEnumerable<TResult2> Select<TResult2>(Func<TResult, TResult2> selector) =>
- new SelectArrayIterator<TSource, TResult2>(_source, CombineSelectors(_selector, selector));
+ new ArraySelectIterator<TSource, TResult2>(_source, CombineSelectors(_selector, selector));
}
/// <summary>
@@ -209,13 +194,13 @@ namespace System.Linq
/// <typeparam name="TSource">The type of the source list.</typeparam>
/// <typeparam name="TResult">The type of the mapped items.</typeparam>
[DebuggerDisplay("Count = {CountForDebugger}")]
- private sealed partial class SelectListIterator<TSource, TResult> : Iterator<TResult>
+ private sealed partial class ListSelectIterator<TSource, TResult> : Iterator<TResult>
{
private readonly List<TSource> _source;
private readonly Func<TSource, TResult> _selector;
private List<TSource>.Enumerator _enumerator;
- public SelectListIterator(List<TSource> source, Func<TSource, TResult> selector)
+ public ListSelectIterator(List<TSource> source, Func<TSource, TResult> selector)
{
Debug.Assert(source != null);
Debug.Assert(selector != null);
@@ -225,7 +210,7 @@ namespace System.Linq
private int CountForDebugger => _source.Count;
- public override Iterator<TResult> Clone() => new SelectListIterator<TSource, TResult>(_source, _selector);
+ public override Iterator<TResult> Clone() => new ListSelectIterator<TSource, TResult>(_source, _selector);
public override bool MoveNext()
{
@@ -250,7 +235,7 @@ namespace System.Linq
}
public override IEnumerable<TResult2> Select<TResult2>(Func<TResult, TResult2> selector) =>
- new SelectListIterator<TSource, TResult2>(_source, CombineSelectors(_selector, selector));
+ new ListSelectIterator<TSource, TResult2>(_source, CombineSelectors(_selector, selector));
}
/// <summary>
@@ -259,13 +244,13 @@ namespace System.Linq
/// <typeparam name="TSource">The type of the source list.</typeparam>
/// <typeparam name="TResult">The type of the mapped items.</typeparam>
[DebuggerDisplay("Count = {CountForDebugger}")]
- private sealed partial class SelectIListIterator<TSource, TResult> : Iterator<TResult>
+ private sealed partial class IListSelectIterator<TSource, TResult> : Iterator<TResult>
{
private readonly IList<TSource> _source;
private readonly Func<TSource, TResult> _selector;
private IEnumerator<TSource>? _enumerator;
- public SelectIListIterator(IList<TSource> source, Func<TSource, TResult> selector)
+ public IListSelectIterator(IList<TSource> source, Func<TSource, TResult> selector)
{
Debug.Assert(source != null);
Debug.Assert(selector != null);
@@ -275,7 +260,7 @@ namespace System.Linq
private int CountForDebugger => _source.Count;
- public override Iterator<TResult> Clone() => new SelectIListIterator<TSource, TResult>(_source, _selector);
+ public override Iterator<TResult> Clone() => new IListSelectIterator<TSource, TResult>(_source, _selector);
public override bool MoveNext()
{
@@ -312,7 +297,7 @@ namespace System.Linq
}
public override IEnumerable<TResult2> Select<TResult2>(Func<TResult, TResult2> selector) =>
- new SelectIListIterator<TSource, TResult2>(_source, CombineSelectors(_selector, selector));
+ new IListSelectIterator<TSource, TResult2>(_source, CombineSelectors(_selector, selector));
}
}
}
diff --git a/src/libraries/System.Linq/src/System/Linq/SelectMany.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/SelectMany.SpeedOpt.cs
index 050ae6a4e06b..ae0bf35ef8f1 100644
--- a/src/libraries/System.Linq/src/System/Linq/SelectMany.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/SelectMany.SpeedOpt.cs
@@ -7,9 +7,9 @@ namespace System.Linq
{
public static partial class Enumerable
{
- private sealed partial class SelectManySingleSelectorIterator<TSource, TResult> : IIListProvider<TResult>
+ private sealed partial class SelectManySingleSelectorIterator<TSource, TResult>
{
- public int GetCount(bool onlyIfCheap)
+ public override int GetCount(bool onlyIfCheap)
{
if (onlyIfCheap)
{
@@ -29,7 +29,7 @@ namespace System.Linq
return count;
}
- public TResult[] ToArray()
+ public override TResult[] ToArray()
{
SegmentedArrayBuilder<TResult>.ScratchBuffer scratch = default;
SegmentedArrayBuilder<TResult> builder = new(scratch);
@@ -46,7 +46,7 @@ namespace System.Linq
return result;
}
- public List<TResult> ToList()
+ public override List<TResult> ToList()
{
var list = new List<TResult>();
diff --git a/src/libraries/System.Linq/src/System/Linq/Skip.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/Skip.SpeedOpt.cs
index 1596dc0cc7cf..74ff73a06824 100644
--- a/src/libraries/System.Linq/src/System/Linq/Skip.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Skip.SpeedOpt.cs
@@ -9,7 +9,7 @@ namespace System.Linq
{
private static IEnumerable<TSource> SkipIterator<TSource>(IEnumerable<TSource> source, int count) =>
source is IList<TSource> sourceList ?
- (IEnumerable<TSource>)new ListPartition<TSource>(sourceList, count, int.MaxValue) :
- new EnumerablePartition<TSource>(source, count, -1);
+ (IEnumerable<TSource>)new IListSkipTakeIterator<TSource>(sourceList, count, int.MaxValue) :
+ new IEnumerableSkipTakeIterator<TSource>(source, count, -1);
}
}
diff --git a/src/libraries/System.Linq/src/System/Linq/Skip.cs b/src/libraries/System.Linq/src/System/Linq/Skip.cs
index 3652d1da3e79..582fb14f12a4 100644
--- a/src/libraries/System.Linq/src/System/Linq/Skip.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Skip.cs
@@ -23,17 +23,19 @@ namespace System.Linq
{
// Return source if not actually skipping, but only if it's a type from here, to avoid
// issues if collections are used as keys or otherwise must not be aliased.
- if (source is Iterator<TSource> || source is IPartition<TSource>)
+ if (source is Iterator<TSource>)
{
return source;
}
count = 0;
}
- else if (source is IPartition<TSource> partition)
+#if !OPTIMIZE_FOR_SIZE
+ else if (source is Iterator<TSource> iterator)
{
- return partition.Skip(count) ?? Empty<TSource>();
+ return iterator.Skip(count) ?? Empty<TSource>();
}
+#endif
return SkipIterator(source, count);
}
diff --git a/src/libraries/System.Linq/src/System/Linq/Partition.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/SkipTake.SpeedOpt.cs
index 202fb803881e..021097de87b4 100644
--- a/src/libraries/System.Linq/src/System/Linq/Partition.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/SkipTake.SpeedOpt.cs
@@ -1,69 +1,11 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-using System.Collections;
using System.Collections.Generic;
using System.Diagnostics;
namespace System.Linq
{
- internal sealed class OrderedPartition<TElement> : IPartition<TElement>
- {
- private readonly OrderedEnumerable<TElement> _source;
- private readonly int _minIndexInclusive;
- private readonly int _maxIndexInclusive;
-
- public OrderedPartition(OrderedEnumerable<TElement> source, int minIdxInclusive, int maxIdxInclusive)
- {
- _source = source;
- _minIndexInclusive = minIdxInclusive;
- _maxIndexInclusive = maxIdxInclusive;
- }
-
- public IEnumerator<TElement> GetEnumerator() => _source.GetEnumerator(_minIndexInclusive, _maxIndexInclusive);
-
- IEnumerator IEnumerable.GetEnumerator() => GetEnumerator();
-
- public IPartition<TElement>? Skip(int count)
- {
- int minIndex = _minIndexInclusive + count;
- return (uint)minIndex > (uint)_maxIndexInclusive ? null : new OrderedPartition<TElement>(_source, minIndex, _maxIndexInclusive);
- }
-
- public IPartition<TElement> Take(int count)
- {
- int maxIndex = _minIndexInclusive + count - 1;
- if ((uint)maxIndex >= (uint)_maxIndexInclusive)
- {
- return this;
- }
-
- return new OrderedPartition<TElement>(_source, _minIndexInclusive, maxIndex);
- }
-
- public TElement? TryGetElementAt(int index, out bool found)
- {
- if ((uint)index <= (uint)(_maxIndexInclusive - _minIndexInclusive))
- {
- return _source.TryGetElementAt(index + _minIndexInclusive, out found);
- }
-
- found = false;
- return default;
- }
-
- public TElement? TryGetFirst(out bool found) => _source.TryGetElementAt(_minIndexInclusive, out found);
-
- public TElement? TryGetLast(out bool found) =>
- _source.TryGetLast(_minIndexInclusive, _maxIndexInclusive, out found);
-
- public TElement[] ToArray() => _source.ToArray(_minIndexInclusive, _maxIndexInclusive);
-
- public List<TElement> ToList() => _source.ToList(_minIndexInclusive, _maxIndexInclusive);
-
- public int GetCount(bool onlyIfCheap) => _source.GetCount(_minIndexInclusive, _maxIndexInclusive, onlyIfCheap);
- }
-
public static partial class Enumerable
{
/// <summary>
@@ -71,13 +13,13 @@ namespace System.Linq
/// </summary>
/// <typeparam name="TSource">The type of the source list.</typeparam>
[DebuggerDisplay("Count = {Count}")]
- private sealed class ListPartition<TSource> : Iterator<TSource>, IPartition<TSource>, IList<TSource>, IReadOnlyList<TSource>
+ private sealed class IListSkipTakeIterator<TSource> : Iterator<TSource>, IList<TSource>, IReadOnlyList<TSource>
{
private readonly IList<TSource> _source;
private readonly int _minIndexInclusive;
private readonly int _maxIndexInclusive;
- public ListPartition(IList<TSource> source, int minIndexInclusive, int maxIndexInclusive)
+ public IListSkipTakeIterator(IList<TSource> source, int minIndexInclusive, int maxIndexInclusive)
{
Debug.Assert(source != null);
Debug.Assert(minIndexInclusive >= 0);
@@ -88,7 +30,7 @@ namespace System.Linq
}
public override Iterator<TSource> Clone() =>
- new ListPartition<TSource>(_source, _minIndexInclusive, _maxIndexInclusive);
+ new IListSkipTakeIterator<TSource>(_source, _minIndexInclusive, _maxIndexInclusive);
public override bool MoveNext()
{
@@ -108,21 +50,21 @@ namespace System.Linq
}
public override IEnumerable<TResult> Select<TResult>(Func<TSource, TResult> selector) =>
- new SelectListPartitionIterator<TSource, TResult>(_source, selector, _minIndexInclusive, _maxIndexInclusive);
+ new IListSkipTakeSelectIterator<TSource, TResult>(_source, selector, _minIndexInclusive, _maxIndexInclusive);
- public IPartition<TSource>? Skip(int count)
+ public override Iterator<TSource>? Skip(int count)
{
int minIndex = _minIndexInclusive + count;
- return (uint)minIndex > (uint)_maxIndexInclusive ? null : new ListPartition<TSource>(_source, minIndex, _maxIndexInclusive);
+ return (uint)minIndex > (uint)_maxIndexInclusive ? null : new IListSkipTakeIterator<TSource>(_source, minIndex, _maxIndexInclusive);
}
- public IPartition<TSource> Take(int count)
+ public override Iterator<TSource> Take(int count)
{
int maxIndex = _minIndexInclusive + count - 1;
- return (uint)maxIndex >= (uint)_maxIndexInclusive ? this : new ListPartition<TSource>(_source, _minIndexInclusive, maxIndex);
+ return (uint)maxIndex >= (uint)_maxIndexInclusive ? this : new IListSkipTakeIterator<TSource>(_source, _minIndexInclusive, maxIndex);
}
- public TSource? TryGetElementAt(int index, out bool found)
+ public override TSource? TryGetElementAt(int index, out bool found)
{
if ((uint)index <= (uint)(_maxIndexInclusive - _minIndexInclusive) && index < _source.Count - _minIndexInclusive)
{
@@ -134,7 +76,7 @@ namespace System.Linq
return default;
}
- public TSource? TryGetFirst(out bool found)
+ public override TSource? TryGetFirst(out bool found)
{
if (_source.Count > _minIndexInclusive)
{
@@ -146,7 +88,7 @@ namespace System.Linq
return default;
}
- public TSource? TryGetLast(out bool found)
+ public override TSource? TryGetLast(out bool found)
{
int lastIndex = _source.Count - 1;
if (lastIndex >= _minIndexInclusive)
@@ -173,9 +115,9 @@ namespace System.Linq
}
}
- public int GetCount(bool onlyIfCheap) => Count;
+ public override int GetCount(bool onlyIfCheap) => Count;
- public TSource[] ToArray()
+ public override TSource[] ToArray()
{
int count = Count;
if (count == 0)
@@ -188,16 +130,16 @@ namespace System.Linq
return array;
}
- public List<TSource> ToList()
+ public override List<TSource> ToList()
{
int count = Count;
- if (count == 0)
+
+ List<TSource> list = [];
+ if (count != 0)
{
- return new List<TSource>();
+ Fill(_source, SetCountAndGetSpan(list, count), _minIndexInclusive);
}
- List<TSource> list = new List<TSource>(count);
- Fill(_source, SetCountAndGetSpan(list, count), _minIndexInclusive);
return list;
}
@@ -257,7 +199,7 @@ namespace System.Linq
/// An iterator that yields the items of part of an <see cref="IEnumerable{TSource}"/>.
/// </summary>
/// <typeparam name="TSource">The type of the source enumerable.</typeparam>
- private sealed class EnumerablePartition<TSource> : Iterator<TSource>, IPartition<TSource>
+ private sealed class IEnumerableSkipTakeIterator<TSource> : Iterator<TSource>
{
private readonly IEnumerable<TSource> _source;
private readonly int _minIndexInclusive;
@@ -265,7 +207,7 @@ namespace System.Linq
// If this is -1, it's impossible to set a limit on the count.
private IEnumerator<TSource>? _enumerator;
- internal EnumerablePartition(IEnumerable<TSource> source, int minIndexInclusive, int maxIndexInclusive)
+ internal IEnumerableSkipTakeIterator(IEnumerable<TSource> source, int minIndexInclusive, int maxIndexInclusive)
{
Debug.Assert(source != null);
Debug.Assert(!(source is IList<TSource>), $"The caller needs to check for {nameof(IList<TSource>)}.");
@@ -289,7 +231,7 @@ namespace System.Linq
private int Limit => _maxIndexInclusive + 1 - _minIndexInclusive; // This is that upper bound.
public override Iterator<TSource> Clone() =>
- new EnumerablePartition<TSource>(_source, _minIndexInclusive, _maxIndexInclusive);
+ new IEnumerableSkipTakeIterator<TSource>(_source, _minIndexInclusive, _maxIndexInclusive);
public override void Dispose()
{
@@ -302,7 +244,7 @@ namespace System.Linq
base.Dispose();
}
- public int GetCount(bool onlyIfCheap)
+ public override int GetCount(bool onlyIfCheap)
{
if (onlyIfCheap)
{
@@ -319,7 +261,7 @@ namespace System.Linq
using (IEnumerator<TSource> en = _source.GetEnumerator())
{
// We only want to iterate up to _maxIndexInclusive + 1.
- // Past that, we know the enumerable will be able to fit this partition,
+ // Past that, we know the enumerable will be able to fit this subset,
// so the count will just be _maxIndexInclusive + 1 - _minIndexInclusive.
// Note that it is possible for _maxIndexInclusive to be int.MaxValue here,
@@ -331,7 +273,6 @@ namespace System.Linq
Debug.Assert(count != (uint)int.MaxValue + 1 || _minIndexInclusive > 0, "Our return value will be incorrect.");
return Math.Max((int)count - _minIndexInclusive, 0);
}
-
}
public override bool MoveNext()
@@ -383,10 +324,7 @@ namespace System.Linq
return false;
}
- public override IEnumerable<TResult> Select<TResult>(Func<TSource, TResult> selector) =>
- new SelectIPartitionIterator<TSource, TResult>(this, selector);
-
- public IPartition<TSource>? Skip(int count)
+ public override Iterator<TSource>? Skip(int count)
{
int minIndex = _minIndexInclusive + count;
@@ -397,7 +335,7 @@ namespace System.Linq
// If we don't know our max count and minIndex can no longer fit in a positive int,
// then we will need to wrap ourselves in another iterator.
// This can happen, for example, during e.Skip(int.MaxValue).Skip(int.MaxValue).
- return new EnumerablePartition<TSource>(this, count, -1);
+ return new IEnumerableSkipTakeIterator<TSource>(this, count, -1);
}
}
else if ((uint)minIndex > (uint)_maxIndexInclusive)
@@ -409,10 +347,10 @@ namespace System.Linq
}
Debug.Assert(minIndex >= 0, $"We should have taken care of all cases when {nameof(minIndex)} overflows.");
- return new EnumerablePartition<TSource>(_source, minIndex, _maxIndexInclusive);
+ return new IEnumerableSkipTakeIterator<TSource>(_source, minIndex, _maxIndexInclusive);
}
- public IPartition<TSource> Take(int count)
+ public override Iterator<TSource> Take(int count)
{
int maxIndex = _minIndexInclusive + count - 1;
if (!HasLimit)
@@ -425,7 +363,7 @@ namespace System.Linq
// _minIndexInclusive (which is count - 1) must fit in an int.
// Example: e.Skip(50).Take(int.MaxValue).
- return new EnumerablePartition<TSource>(this, 0, count - 1);
+ return new IEnumerableSkipTakeIterator<TSource>(this, 0, count - 1);
}
}
else if ((uint)maxIndex >= (uint)_maxIndexInclusive)
@@ -437,18 +375,23 @@ namespace System.Linq
}
Debug.Assert(maxIndex >= 0, $"We should have taken care of all cases when {nameof(maxIndex)} overflows.");
- return new EnumerablePartition<TSource>(_source, _minIndexInclusive, maxIndex);
+ return new IEnumerableSkipTakeIterator<TSource>(_source, _minIndexInclusive, maxIndex);
}
- public TSource? TryGetElementAt(int index, out bool found)
+ public override TSource? TryGetElementAt(int index, out bool found)
{
// If the index is negative or >= our max count, return early.
if (index >= 0 && (!HasLimit || index < Limit))
{
- using (IEnumerator<TSource> en = _source.GetEnumerator())
+ Debug.Assert(_minIndexInclusive + index >= 0, $"Adding {nameof(index)} caused {nameof(_minIndexInclusive)} to overflow.");
+
+ if (_source is Iterator<TSource> iterator)
{
- Debug.Assert(_minIndexInclusive + index >= 0, $"Adding {nameof(index)} caused {nameof(_minIndexInclusive)} to overflow.");
+ return iterator.TryGetElementAt(_minIndexInclusive + index, out found);
+ }
+ using (IEnumerator<TSource> en = _source.GetEnumerator())
+ {
if (SkipBefore(_minIndexInclusive + index, en) && en.MoveNext())
{
found = true;
@@ -461,8 +404,15 @@ namespace System.Linq
return default;
}
- public TSource? TryGetFirst(out bool found)
+ public override TSource? TryGetFirst(out bool found)
{
+ Debug.Assert(!HasLimit || Limit > 0);
+
+ if (_source is Iterator<TSource> iterator)
+ {
+ return iterator.TryGetElementAt(_minIndexInclusive, out found);
+ }
+
using (IEnumerator<TSource> en = _source.GetEnumerator())
{
if (SkipBeforeFirst(en) && en.MoveNext())
@@ -476,8 +426,17 @@ namespace System.Linq
return default;
}
- public TSource? TryGetLast(out bool found)
+ public override TSource? TryGetLast(out bool found)
{
+ if (_source is Iterator<TSource> iterator &&
+ iterator.GetCount(onlyIfCheap: true) is int count &&
+ count >= _minIndexInclusive)
+ {
+ return !HasLimit ?
+ iterator.TryGetLast(out found) :
+ iterator.TryGetElementAt(_maxIndexInclusive, out found);
+ }
+
using (IEnumerator<TSource> en = _source.GetEnumerator())
{
if (SkipBeforeFirst(en) && en.MoveNext())
@@ -502,7 +461,7 @@ namespace System.Linq
return default;
}
- public TSource[] ToArray()
+ public override TSource[] ToArray()
{
using (IEnumerator<TSource> en = _source.GetEnumerator())
{
@@ -530,7 +489,7 @@ namespace System.Linq
return [];
}
- public List<TSource> ToList()
+ public override List<TSource> ToList()
{
var list = new List<TSource>();
diff --git a/src/libraries/System.Linq/src/System/Linq/Take.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/Take.SpeedOpt.cs
index f97c5295f75a..f761d4a8ab07 100644
--- a/src/libraries/System.Linq/src/System/Linq/Take.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Take.SpeedOpt.cs
@@ -14,9 +14,9 @@ namespace System.Linq
Debug.Assert(count > 0);
return
- source is IPartition<TSource> partition ? (partition.Take(count) ?? Empty<TSource>()) :
- source is IList<TSource> sourceList ? new ListPartition<TSource>(sourceList, 0, count - 1) :
- new EnumerablePartition<TSource>(source, 0, count - 1);
+ source is Iterator<TSource> iterator ? (iterator.Take(count) ?? Empty<TSource>()) :
+ source is IList<TSource> sourceList ? new IListSkipTakeIterator<TSource>(sourceList, 0, count - 1) :
+ new IEnumerableSkipTakeIterator<TSource>(source, 0, count - 1);
}
private static IEnumerable<TSource> TakeRangeIterator<TSource>(IEnumerable<TSource> source, int startIndex, int endIndex)
@@ -25,15 +25,15 @@ namespace System.Linq
Debug.Assert(startIndex >= 0 && startIndex < endIndex);
return
- source is IPartition<TSource> partition ? TakePartitionRange(partition, startIndex, endIndex) :
- source is IList<TSource> sourceList ? new ListPartition<TSource>(sourceList, startIndex, endIndex - 1) :
- new EnumerablePartition<TSource>(source, startIndex, endIndex - 1);
+ source is Iterator<TSource> iterator ? TakeIteratorRange(iterator, startIndex, endIndex) :
+ source is IList<TSource> sourceList ? new IListSkipTakeIterator<TSource>(sourceList, startIndex, endIndex - 1) :
+ new IEnumerableSkipTakeIterator<TSource>(source, startIndex, endIndex - 1);
- static IEnumerable<TSource> TakePartitionRange(IPartition<TSource> partition, int startIndex, int endIndex)
+ static IEnumerable<TSource> TakeIteratorRange(Iterator<TSource> iterator, int startIndex, int endIndex)
{
- IPartition<TSource>? source;
+ Iterator<TSource>? source;
if (endIndex != 0 &&
- (source = partition.Take(endIndex)) is not null &&
+ (source = iterator.Take(endIndex)) is not null &&
(startIndex == 0 || (source = source!.Skip(startIndex)) is not null))
{
return source;
diff --git a/src/libraries/System.Linq/src/System/Linq/ToCollection.cs b/src/libraries/System.Linq/src/System/Linq/ToCollection.cs
index 043cac8f0038..afbf1d4e158a 100644
--- a/src/libraries/System.Linq/src/System/Linq/ToCollection.cs
+++ b/src/libraries/System.Linq/src/System/Linq/ToCollection.cs
@@ -16,10 +16,12 @@ namespace System.Linq
ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source);
}
- if (source is IIListProvider<TSource> arrayProvider)
+#if !OPTIMIZE_FOR_SIZE
+ if (source is Iterator<TSource> iterator)
{
- return arrayProvider.ToArray();
+ return iterator.ToArray();
}
+#endif
if (source is ICollection<TSource> collection)
{
@@ -57,10 +59,12 @@ namespace System.Linq
ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source);
}
- if (source is IIListProvider<TSource> listProvider)
+#if !OPTIMIZE_FOR_SIZE
+ if (source is Iterator<TSource> iterator)
{
- return listProvider.ToList();
+ return iterator.ToList();
}
+#endif
return new List<TSource>(source);
}
diff --git a/src/libraries/System.Linq/src/System/Linq/Union.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/Union.SpeedOpt.cs
index 6acf199e665b..0efa8248405b 100644
--- a/src/libraries/System.Linq/src/System/Linq/Union.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Union.SpeedOpt.cs
@@ -7,7 +7,7 @@ namespace System.Linq
{
public static partial class Enumerable
{
- private abstract partial class UnionIterator<TSource> : IIListProvider<TSource>
+ private abstract partial class UnionIterator<TSource>
{
private HashSet<TSource> FillSet()
{
@@ -24,11 +24,27 @@ namespace System.Linq
}
}
- public TSource[] ToArray() => Enumerable.HashSetToArray(FillSet());
+ public override TSource[] ToArray() => HashSetToArray(FillSet());
- public List<TSource> ToList() => new List<TSource>(FillSet());
+ public override List<TSource> ToList() => new List<TSource>(FillSet());
- public int GetCount(bool onlyIfCheap) => onlyIfCheap ? -1 : FillSet().Count;
+ public override int GetCount(bool onlyIfCheap) => onlyIfCheap ? -1 : FillSet().Count;
+
+ public override TSource? TryGetFirst(out bool found)
+ {
+ IEnumerable<TSource>? source;
+ for (int i = 0; (source = GetEnumerable(i)) is not null; i++)
+ {
+ TSource? result = source.TryGetFirst(out found);
+ if (found)
+ {
+ return result;
+ }
+ }
+
+ found = false;
+ return default;
+ }
}
}
}
diff --git a/src/libraries/System.Linq/src/System/Linq/Where.SpeedOpt.cs b/src/libraries/System.Linq/src/System/Linq/Where.SpeedOpt.cs
index 5bf0a7180883..d7adb1bd9917 100644
--- a/src/libraries/System.Linq/src/System/Linq/Where.SpeedOpt.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Where.SpeedOpt.cs
@@ -8,9 +8,9 @@ namespace System.Linq
{
public static partial class Enumerable
{
- private sealed partial class WhereEnumerableIterator<TSource> : IPartition<TSource>
+ private sealed partial class IEnumerableWhereIterator<TSource>
{
- public int GetCount(bool onlyIfCheap)
+ public override int GetCount(bool onlyIfCheap)
{
if (onlyIfCheap)
{
@@ -33,7 +33,7 @@ namespace System.Linq
return count;
}
- public TSource[] ToArray()
+ public override TSource[] ToArray()
{
SegmentedArrayBuilder<TSource>.ScratchBuffer scratch = default;
SegmentedArrayBuilder<TSource> builder = new(scratch);
@@ -53,7 +53,7 @@ namespace System.Linq
return result;
}
- public List<TSource> ToList()
+ public override List<TSource> ToList()
{
var list = new List<TSource>();
@@ -69,7 +69,7 @@ namespace System.Linq
return list;
}
- public TSource? TryGetFirst(out bool found)
+ public override TSource? TryGetFirst(out bool found)
{
Func<TSource, bool> predicate = _predicate;
@@ -86,7 +86,7 @@ namespace System.Linq
return default;
}
- public TSource? TryGetLast(out bool found)
+ public override TSource? TryGetLast(out bool found)
{
using IEnumerator<TSource> e = _source.GetEnumerator();
@@ -121,7 +121,7 @@ namespace System.Linq
return default;
}
- public TSource? TryGetElementAt(int index, out bool found)
+ public override TSource? TryGetElementAt(int index, out bool found)
{
if (index >= 0)
{
@@ -145,15 +145,11 @@ namespace System.Linq
found = false;
return default;
}
-
- public IPartition<TSource>? Skip(int count) => new EnumerablePartition<TSource>(this, count, -1);
-
- public IPartition<TSource>? Take(int count) => new EnumerablePartition<TSource>(this, 0, count - 1);
}
- internal sealed partial class WhereArrayIterator<TSource> : IPartition<TSource>
+ internal sealed partial class ArrayWhereIterator<TSource>
{
- public int GetCount(bool onlyIfCheap) => GetCount(onlyIfCheap, _source, _predicate);
+ public override int GetCount(bool onlyIfCheap) => GetCount(onlyIfCheap, _source, _predicate);
public static int GetCount(bool onlyIfCheap, ReadOnlySpan<TSource> source, Func<TSource, bool> predicate)
{
@@ -178,7 +174,7 @@ namespace System.Linq
return count;
}
- public TSource[] ToArray() => ToArray(_source, _predicate);
+ public override TSource[] ToArray() => ToArray(_source, _predicate);
public static TSource[] ToArray(ReadOnlySpan<TSource> source, Func<TSource, bool> predicate)
{
@@ -199,7 +195,7 @@ namespace System.Linq
return result;
}
- public List<TSource> ToList() => ToList(_source, _predicate);
+ public override List<TSource> ToList() => ToList(_source, _predicate);
public static List<TSource> ToList(ReadOnlySpan<TSource> source, Func<TSource, bool> predicate)
{
@@ -216,7 +212,7 @@ namespace System.Linq
return list;
}
- public TSource? TryGetFirst(out bool found)
+ public override TSource? TryGetFirst(out bool found)
{
Func<TSource, bool> predicate = _predicate;
@@ -233,7 +229,7 @@ namespace System.Linq
return default;
}
- public TSource? TryGetLast(out bool found)
+ public override TSource? TryGetLast(out bool found)
{
TSource[] source = _source;
Func<TSource, bool> predicate = _predicate;
@@ -251,7 +247,7 @@ namespace System.Linq
return default;
}
- public TSource? TryGetElementAt(int index, out bool found)
+ public override TSource? TryGetElementAt(int index, out bool found)
{
if (index >= 0)
{
@@ -275,21 +271,17 @@ namespace System.Linq
found = false;
return default;
}
-
- public IPartition<TSource>? Skip(int count) => new EnumerablePartition<TSource>(this, count, -1);
-
- public IPartition<TSource>? Take(int count) => new EnumerablePartition<TSource>(this, 0, count - 1);
}
- private sealed partial class WhereListIterator<TSource> : Iterator<TSource>, IPartition<TSource>
+ private sealed partial class ListWhereIterator<TSource> : Iterator<TSource>
{
- public int GetCount(bool onlyIfCheap) => WhereArrayIterator<TSource>.GetCount(onlyIfCheap, CollectionsMarshal.AsSpan(_source), _predicate);
+ public override int GetCount(bool onlyIfCheap) => ArrayWhereIterator<TSource>.GetCount(onlyIfCheap, CollectionsMarshal.AsSpan(_source), _predicate);
- public TSource[] ToArray() => WhereArrayIterator<TSource>.ToArray(CollectionsMarshal.AsSpan(_source), _predicate);
+ public override TSource[] ToArray() => ArrayWhereIterator<TSource>.ToArray(CollectionsMarshal.AsSpan(_source), _predicate);
- public List<TSource> ToList() => WhereArrayIterator<TSource>.ToList(CollectionsMarshal.AsSpan(_source), _predicate);
+ public override List<TSource> ToList() => ArrayWhereIterator<TSource>.ToList(CollectionsMarshal.AsSpan(_source), _predicate);
- public TSource? TryGetFirst(out bool found)
+ public override TSource? TryGetFirst(out bool found)
{
Func<TSource, bool> predicate = _predicate;
@@ -306,7 +298,7 @@ namespace System.Linq
return default;
}
- public TSource? TryGetLast(out bool found)
+ public override TSource? TryGetLast(out bool found)
{
ReadOnlySpan<TSource> source = CollectionsMarshal.AsSpan(_source);
Func<TSource, bool> predicate = _predicate;
@@ -324,7 +316,7 @@ namespace System.Linq
return default;
}
- public TSource? TryGetElementAt(int index, out bool found)
+ public override TSource? TryGetElementAt(int index, out bool found)
{
if (index >= 0)
{
@@ -348,15 +340,11 @@ namespace System.Linq
found = false;
return default;
}
-
- public IPartition<TSource>? Skip(int count) => new EnumerablePartition<TSource>(this, count, -1);
-
- public IPartition<TSource>? Take(int count) => new EnumerablePartition<TSource>(this, 0, count - 1);
}
- private sealed partial class WhereSelectArrayIterator<TSource, TResult> : IPartition<TResult>
+ private sealed partial class ArrayWhereSelectIterator<TSource, TResult>
{
- public int GetCount(bool onlyIfCheap) => GetCount(onlyIfCheap, _source, _predicate, _selector);
+ public override int GetCount(bool onlyIfCheap) => GetCount(onlyIfCheap, _source, _predicate, _selector);
public static int GetCount(bool onlyIfCheap, ReadOnlySpan<TSource> source, Func<TSource, bool> predicate, Func<TSource, TResult> selector)
{
@@ -385,7 +373,7 @@ namespace System.Linq
return count;
}
- public TResult[] ToArray() => ToArray(_source, _predicate, _selector);
+ public override TResult[] ToArray() => ToArray(_source, _predicate, _selector);
public static TResult[] ToArray(ReadOnlySpan<TSource> source, Func<TSource, bool> predicate, Func<TSource, TResult> selector)
{
@@ -406,7 +394,7 @@ namespace System.Linq
return result;
}
- public List<TResult> ToList() => ToList(_source, _predicate, _selector);
+ public override List<TResult> ToList() => ToList(_source, _predicate, _selector);
public static List<TResult> ToList(ReadOnlySpan<TSource> source, Func<TSource, bool> predicate, Func<TSource, TResult> selector)
{
@@ -423,7 +411,7 @@ namespace System.Linq
return list;
}
- public TResult? TryGetFirst(out bool found) => TryGetFirst(_source, _predicate, _selector, out found);
+ public override TResult? TryGetFirst(out bool found) => TryGetFirst(_source, _predicate, _selector, out found);
public static TResult? TryGetFirst(ReadOnlySpan<TSource> source, Func<TSource, bool> predicate, Func<TSource, TResult> selector, out bool found)
{
@@ -440,7 +428,7 @@ namespace System.Linq
return default;
}
- public TResult? TryGetLast(out bool found) => TryGetLast(_source, _predicate, _selector, out found);
+ public override TResult? TryGetLast(out bool found) => TryGetLast(_source, _predicate, _selector, out found);
public static TResult? TryGetLast(ReadOnlySpan<TSource> source, Func<TSource, bool> predicate, Func<TSource, TResult> selector, out bool found)
{
@@ -457,7 +445,7 @@ namespace System.Linq
return default;
}
- public TResult? TryGetElementAt(int index, out bool found) => TryGetElementAt(_source, _predicate, _selector, index, out found);
+ public override TResult? TryGetElementAt(int index, out bool found) => TryGetElementAt(_source, _predicate, _selector, index, out found);
public static TResult? TryGetElementAt(ReadOnlySpan<TSource> source, Func<TSource, bool> predicate, Func<TSource, TResult> selector, int index, out bool found)
{
@@ -481,34 +469,26 @@ namespace System.Linq
found = false;
return default;
}
-
- public IPartition<TResult>? Skip(int count) => new EnumerablePartition<TResult>(this, count, -1);
-
- public IPartition<TResult>? Take(int count) => new EnumerablePartition<TResult>(this, 0, count - 1);
}
- private sealed partial class WhereSelectListIterator<TSource, TResult> : IPartition<TResult>
+ private sealed partial class ListWhereSelectIterator<TSource, TResult>
{
- public int GetCount(bool onlyIfCheap) => WhereSelectArrayIterator<TSource, TResult>.GetCount(onlyIfCheap, CollectionsMarshal.AsSpan(_source), _predicate, _selector);
-
- public TResult[] ToArray() => WhereSelectArrayIterator<TSource, TResult>.ToArray(CollectionsMarshal.AsSpan(_source), _predicate, _selector);
+ public override int GetCount(bool onlyIfCheap) => ArrayWhereSelectIterator<TSource, TResult>.GetCount(onlyIfCheap, CollectionsMarshal.AsSpan(_source), _predicate, _selector);
- public List<TResult> ToList() => WhereSelectArrayIterator<TSource, TResult>.ToList(CollectionsMarshal.AsSpan(_source), _predicate, _selector);
+ public override TResult[] ToArray() => ArrayWhereSelectIterator<TSource, TResult>.ToArray(CollectionsMarshal.AsSpan(_source), _predicate, _selector);
- public TResult? TryGetElementAt(int index, out bool found) => WhereSelectArrayIterator<TSource, TResult>.TryGetElementAt(CollectionsMarshal.AsSpan(_source), _predicate, _selector, index, out found);
+ public override List<TResult> ToList() => ArrayWhereSelectIterator<TSource, TResult>.ToList(CollectionsMarshal.AsSpan(_source), _predicate, _selector);
- public TResult? TryGetFirst(out bool found) => WhereSelectArrayIterator<TSource, TResult>.TryGetFirst(CollectionsMarshal.AsSpan(_source), _predicate, _selector, out found);
+ public override TResult? TryGetElementAt(int index, out bool found) => ArrayWhereSelectIterator<TSource, TResult>.TryGetElementAt(CollectionsMarshal.AsSpan(_source), _predicate, _selector, index, out found);
- public TResult? TryGetLast(out bool found) => WhereSelectArrayIterator<TSource, TResult>.TryGetLast(CollectionsMarshal.AsSpan(_source), _predicate, _selector, out found);
+ public override TResult? TryGetFirst(out bool found) => ArrayWhereSelectIterator<TSource, TResult>.TryGetFirst(CollectionsMarshal.AsSpan(_source), _predicate, _selector, out found);
- public IPartition<TResult>? Skip(int count) => new EnumerablePartition<TResult>(this, count, -1);
-
- public IPartition<TResult>? Take(int count) => new EnumerablePartition<TResult>(this, 0, count - 1);
+ public override TResult? TryGetLast(out bool found) => ArrayWhereSelectIterator<TSource, TResult>.TryGetLast(CollectionsMarshal.AsSpan(_source), _predicate, _selector, out found);
}
- private sealed partial class WhereSelectEnumerableIterator<TSource, TResult> : IPartition<TResult>
+ private sealed partial class IEnumerableWhereSelectIterator<TSource, TResult>
{
- public int GetCount(bool onlyIfCheap)
+ public override int GetCount(bool onlyIfCheap)
{
// In case someone uses Count() to force evaluation of
// the selector, run it provided `onlyIfCheap` is false.
@@ -535,7 +515,7 @@ namespace System.Linq
return count;
}
- public TResult[] ToArray()
+ public override TResult[] ToArray()
{
SegmentedArrayBuilder<TResult>.ScratchBuffer scratch = default;
SegmentedArrayBuilder<TResult> builder = new(scratch);
@@ -556,7 +536,7 @@ namespace System.Linq
return result;
}
- public List<TResult> ToList()
+ public override List<TResult> ToList()
{
var list = new List<TResult>();
@@ -573,7 +553,7 @@ namespace System.Linq
return list;
}
- public TResult? TryGetFirst(out bool found)
+ public override TResult? TryGetFirst(out bool found)
{
Func<TSource, bool> predicate = _predicate;
@@ -590,7 +570,7 @@ namespace System.Linq
return default;
}
- public TResult? TryGetLast(out bool found)
+ public override TResult? TryGetLast(out bool found)
{
using IEnumerator<TSource> e = _source.GetEnumerator();
@@ -625,7 +605,7 @@ namespace System.Linq
return default;
}
- public TResult? TryGetElementAt(int index, out bool found)
+ public override TResult? TryGetElementAt(int index, out bool found)
{
if (index >= 0)
{
@@ -649,10 +629,6 @@ namespace System.Linq
found = false;
return default;
}
-
- public IPartition<TResult>? Skip(int count) => new EnumerablePartition<TResult>(this, count, -1);
-
- public IPartition<TResult>? Take(int count) => new EnumerablePartition<TResult>(this, 0, count - 1);
}
}
}
diff --git a/src/libraries/System.Linq/src/System/Linq/Where.cs b/src/libraries/System.Linq/src/System/Linq/Where.cs
index aec6370a330f..a3a6656c43ba 100644
--- a/src/libraries/System.Linq/src/System/Linq/Where.cs
+++ b/src/libraries/System.Linq/src/System/Linq/Where.cs
@@ -33,15 +33,15 @@ namespace System.Linq
return [];
}
- return new WhereArrayIterator<TSource>(array, predicate);
+ return new ArrayWhereIterator<TSource>(array, predicate);
}
if (source is List<TSource> list)
{
- return new WhereListIterator<TSource>(list, predicate);
+ return new ListWhereIterator<TSource>(list, predicate);
}
- return new WhereEnumerableIterator<TSource>(source, predicate);
+ return new IEnumerableWhereIterator<TSource>(source, predicate);
}
public static IEnumerable<TSource> Where<TSource>(this IEnumerable<TSource> source, Func<TSource, int, bool> predicate)
@@ -85,13 +85,13 @@ namespace System.Linq
/// An iterator that filters each item of an <see cref="IEnumerable{TSource}"/>.
/// </summary>
/// <typeparam name="TSource">The type of the source enumerable.</typeparam>
- private sealed partial class WhereEnumerableIterator<TSource> : Iterator<TSource>
+ private sealed partial class IEnumerableWhereIterator<TSource> : Iterator<TSource>
{
private readonly IEnumerable<TSource> _source;
private readonly Func<TSource, bool> _predicate;
private IEnumerator<TSource>? _enumerator;
- public WhereEnumerableIterator(IEnumerable<TSource> source, Func<TSource, bool> predicate)
+ public IEnumerableWhereIterator(IEnumerable<TSource> source, Func<TSource, bool> predicate)
{
Debug.Assert(source != null);
Debug.Assert(predicate != null);
@@ -99,7 +99,7 @@ namespace System.Linq
_predicate = predicate;
}
- public override Iterator<TSource> Clone() => new WhereEnumerableIterator<TSource>(_source, _predicate);
+ public override Iterator<TSource> Clone() => new IEnumerableWhereIterator<TSource>(_source, _predicate);
public override void Dispose()
{
@@ -140,22 +140,22 @@ namespace System.Linq
}
public override IEnumerable<TResult> Select<TResult>(Func<TSource, TResult> selector) =>
- new WhereSelectEnumerableIterator<TSource, TResult>(_source, _predicate, selector);
+ new IEnumerableWhereSelectIterator<TSource, TResult>(_source, _predicate, selector);
public override IEnumerable<TSource> Where(Func<TSource, bool> predicate) =>
- new WhereEnumerableIterator<TSource>(_source, CombinePredicates(_predicate, predicate));
+ new IEnumerableWhereIterator<TSource>(_source, CombinePredicates(_predicate, predicate));
}
/// <summary>
/// An iterator that filters each item of an array.
/// </summary>
/// <typeparam name="TSource">The type of the source array.</typeparam>
- internal sealed partial class WhereArrayIterator<TSource> : Iterator<TSource>
+ internal sealed partial class ArrayWhereIterator<TSource> : Iterator<TSource>
{
private readonly TSource[] _source;
private readonly Func<TSource, bool> _predicate;
- public WhereArrayIterator(TSource[] source, Func<TSource, bool> predicate)
+ public ArrayWhereIterator(TSource[] source, Func<TSource, bool> predicate)
{
Debug.Assert(source != null && source.Length > 0);
Debug.Assert(predicate != null);
@@ -164,7 +164,7 @@ namespace System.Linq
}
public override Iterator<TSource> Clone() =>
- new WhereArrayIterator<TSource>(_source, _predicate);
+ new ArrayWhereIterator<TSource>(_source, _predicate);
public override bool MoveNext()
{
@@ -187,23 +187,23 @@ namespace System.Linq
}
public override IEnumerable<TResult> Select<TResult>(Func<TSource, TResult> selector) =>
- new WhereSelectArrayIterator<TSource, TResult>(_source, _predicate, selector);
+ new ArrayWhereSelectIterator<TSource, TResult>(_source, _predicate, selector);
public override IEnumerable<TSource> Where(Func<TSource, bool> predicate) =>
- new WhereArrayIterator<TSource>(_source, CombinePredicates(_predicate, predicate));
+ new ArrayWhereIterator<TSource>(_source, CombinePredicates(_predicate, predicate));
}
/// <summary>
/// An iterator that filters each item of a <see cref="List{TSource}"/>.
/// </summary>
/// <typeparam name="TSource">The type of the source list.</typeparam>
- private sealed partial class WhereListIterator<TSource> : Iterator<TSource>
+ private sealed partial class ListWhereIterator<TSource> : Iterator<TSource>
{
private readonly List<TSource> _source;
private readonly Func<TSource, bool> _predicate;
private List<TSource>.Enumerator _enumerator;
- public WhereListIterator(List<TSource> source, Func<TSource, bool> predicate)
+ public ListWhereIterator(List<TSource> source, Func<TSource, bool> predicate)
{
Debug.Assert(source != null);
Debug.Assert(predicate != null);
@@ -212,7 +212,7 @@ namespace System.Linq
}
public override Iterator<TSource> Clone() =>
- new WhereListIterator<TSource>(_source, _predicate);
+ new ListWhereIterator<TSource>(_source, _predicate);
public override bool MoveNext()
{
@@ -241,10 +241,10 @@ namespace System.Linq
}
public override IEnumerable<TResult> Select<TResult>(Func<TSource, TResult> selector) =>
- new WhereSelectListIterator<TSource, TResult>(_source, _predicate, selector);
+ new ListWhereSelectIterator<TSource, TResult>(_source, _predicate, selector);
public override IEnumerable<TSource> Where(Func<TSource, bool> predicate) =>
- new WhereListIterator<TSource>(_source, CombinePredicates(_predicate, predicate));
+ new ListWhereIterator<TSource>(_source, CombinePredicates(_predicate, predicate));
}
/// <summary>
@@ -252,13 +252,13 @@ namespace System.Linq
/// </summary>
/// <typeparam name="TSource">The type of the source array.</typeparam>
/// <typeparam name="TResult">The type of the mapped items.</typeparam>
- private sealed partial class WhereSelectArrayIterator<TSource, TResult> : Iterator<TResult>
+ private sealed partial class ArrayWhereSelectIterator<TSource, TResult> : Iterator<TResult>
{
private readonly TSource[] _source;
private readonly Func<TSource, bool> _predicate;
private readonly Func<TSource, TResult> _selector;
- public WhereSelectArrayIterator(TSource[] source, Func<TSource, bool> predicate, Func<TSource, TResult> selector)
+ public ArrayWhereSelectIterator(TSource[] source, Func<TSource, bool> predicate, Func<TSource, TResult> selector)
{
Debug.Assert(source != null && source.Length > 0);
Debug.Assert(predicate != null);
@@ -269,7 +269,7 @@ namespace System.Linq
}
public override Iterator<TResult> Clone() =>
- new WhereSelectArrayIterator<TSource, TResult>(_source, _predicate, _selector);
+ new ArrayWhereSelectIterator<TSource, TResult>(_source, _predicate, _selector);
public override bool MoveNext()
{
@@ -292,7 +292,7 @@ namespace System.Linq
}
public override IEnumerable<TResult2> Select<TResult2>(Func<TResult, TResult2> selector) =>
- new WhereSelectArrayIterator<TSource, TResult2>(_source, _predicate, CombineSelectors(_selector, selector));
+ new ArrayWhereSelectIterator<TSource, TResult2>(_source, _predicate, CombineSelectors(_selector, selector));
}
/// <summary>
@@ -300,14 +300,14 @@ namespace System.Linq
/// </summary>
/// <typeparam name="TSource">The type of the source list.</typeparam>
/// <typeparam name="TResult">The type of the mapped items.</typeparam>
- private sealed partial class WhereSelectListIterator<TSource, TResult> : Iterator<TResult>
+ private sealed partial class ListWhereSelectIterator<TSource, TResult> : Iterator<TResult>
{
private readonly List<TSource> _source;
private readonly Func<TSource, bool> _predicate;
private readonly Func<TSource, TResult> _selector;
private List<TSource>.Enumerator _enumerator;
- public WhereSelectListIterator(List<TSource> source, Func<TSource, bool> predicate, Func<TSource, TResult> selector)
+ public ListWhereSelectIterator(List<TSource> source, Func<TSource, bool> predicate, Func<TSource, TResult> selector)
{
Debug.Assert(source != null);
Debug.Assert(predicate != null);
@@ -318,7 +318,7 @@ namespace System.Linq
}
public override Iterator<TResult> Clone() =>
- new WhereSelectListIterator<TSource, TResult>(_source, _predicate, _selector);
+ new ListWhereSelectIterator<TSource, TResult>(_source, _predicate, _selector);
public override bool MoveNext()
{
@@ -347,7 +347,7 @@ namespace System.Linq
}
public override IEnumerable<TResult2> Select<TResult2>(Func<TResult, TResult2> selector) =>
- new WhereSelectListIterator<TSource, TResult2>(_source, _predicate, CombineSelectors(_selector, selector));
+ new ListWhereSelectIterator<TSource, TResult2>(_source, _predicate, CombineSelectors(_selector, selector));
}
/// <summary>
@@ -355,14 +355,14 @@ namespace System.Linq
/// </summary>
/// <typeparam name="TSource">The type of the source enumerable.</typeparam>
/// <typeparam name="TResult">The type of the mapped items.</typeparam>
- private sealed partial class WhereSelectEnumerableIterator<TSource, TResult> : Iterator<TResult>
+ private sealed partial class IEnumerableWhereSelectIterator<TSource, TResult> : Iterator<TResult>
{
private readonly IEnumerable<TSource> _source;
private readonly Func<TSource, bool> _predicate;
private readonly Func<TSource, TResult> _selector;
private IEnumerator<TSource>? _enumerator;
- public WhereSelectEnumerableIterator(IEnumerable<TSource> source, Func<TSource, bool> predicate, Func<TSource, TResult> selector)
+ public IEnumerableWhereSelectIterator(IEnumerable<TSource> source, Func<TSource, bool> predicate, Func<TSource, TResult> selector)
{
Debug.Assert(source != null);
Debug.Assert(predicate != null);
@@ -373,7 +373,7 @@ namespace System.Linq
}
public override Iterator<TResult> Clone() =>
- new WhereSelectEnumerableIterator<TSource, TResult>(_source, _predicate, _selector);
+ new IEnumerableWhereSelectIterator<TSource, TResult>(_source, _predicate, _selector);
public override void Dispose()
{
@@ -414,7 +414,7 @@ namespace System.Linq
}
public override IEnumerable<TResult2> Select<TResult2>(Func<TResult, TResult2> selector) =>
- new WhereSelectEnumerableIterator<TSource, TResult2>(_source, _predicate, CombineSelectors(_selector, selector));
+ new IEnumerableWhereSelectIterator<TSource, TResult2>(_source, _predicate, CombineSelectors(_selector, selector));
}
}
}
diff --git a/src/libraries/System.Linq/tests/AggregateByTests.cs b/src/libraries/System.Linq/tests/AggregateByTests.cs
index daae145f7755..6232ce24a6df 100644
--- a/src/libraries/System.Linq/tests/AggregateByTests.cs
+++ b/src/libraries/System.Linq/tests/AggregateByTests.cs
@@ -9,28 +9,42 @@ namespace System.Linq.Tests
public class AggregateByTests : EnumerableTests
{
[Fact]
+ public void Empty()
+ {
+ Assert.All(IdentityTransforms<int>(), transform =>
+ {
+ Assert.Equal(Enumerable.Empty<KeyValuePair<int, int>>(), transform(Enumerable.Empty<int>()).AggregateBy(i => i, i => i, (a, i) => a + i));
+ Assert.Equal(Enumerable.Empty<KeyValuePair<int, int>>(), transform(Enumerable.Empty<int>()).AggregateBy(i => i, 0, (a, i) => a + i));
+ });
+ }
+
+ [Fact]
public void AggregateBy_SourceNull_ThrowsArgumentNullException()
{
string[] first = null;
AssertExtensions.Throws<ArgumentNullException>("source", () => first.AggregateBy(x => x, string.Empty, (x, y) => x + y));
AssertExtensions.Throws<ArgumentNullException>("source", () => first.AggregateBy(x => x, string.Empty, (x, y) => x + y, new AnagramEqualityComparer()));
+ AssertExtensions.Throws<ArgumentNullException>("source", () => first.AggregateBy(x => x, x => x, (x, y) => x + y));
+ AssertExtensions.Throws<ArgumentNullException>("source", () => first.AggregateBy(x => x, x => x, (x, y) => x + y, new AnagramEqualityComparer()));
}
[Fact]
public void AggregateBy_KeySelectorNull_ThrowsArgumentNullException()
{
- string[] source = { };
+ string[] source = ["test"];
Func<string, string> keySelector = null;
AssertExtensions.Throws<ArgumentNullException>("keySelector", () => source.AggregateBy(keySelector, string.Empty, (x, y) => x + y));
AssertExtensions.Throws<ArgumentNullException>("keySelector", () => source.AggregateBy(keySelector, string.Empty, (x, y) => x + y, new AnagramEqualityComparer()));
+ AssertExtensions.Throws<ArgumentNullException>("keySelector", () => source.AggregateBy(keySelector, x => x, (x, y) => x + y));
+ AssertExtensions.Throws<ArgumentNullException>("keySelector", () => source.AggregateBy(keySelector, x => x, (x, y) => x + y, new AnagramEqualityComparer()));
}
[Fact]
public void AggregateBy_SeedSelectorNull_ThrowsArgumentNullException()
{
- string[] source = { };
+ string[] source = ["test"];
Func<string, string> seedSelector = null;
AssertExtensions.Throws<ArgumentNullException>("seedSelector", () => source.AggregateBy(x => x, seedSelector, (x, y) => x + y));
@@ -40,11 +54,13 @@ namespace System.Linq.Tests
[Fact]
public void AggregateBy_FuncNull_ThrowsArgumentNullException()
{
- string[] source = { };
+ string[] source = ["test"];
Func<string, string, string> func = null;
AssertExtensions.Throws<ArgumentNullException>("func", () => source.AggregateBy(x => x, string.Empty, func));
AssertExtensions.Throws<ArgumentNullException>("func", () => source.AggregateBy(x => x, string.Empty, func, new AnagramEqualityComparer()));
+ AssertExtensions.Throws<ArgumentNullException>("func", () => source.AggregateBy(x => x, x => x, func));
+ AssertExtensions.Throws<ArgumentNullException>("func", () => source.AggregateBy(x => x, x => x, func, new AnagramEqualityComparer()));
}
[Fact]
diff --git a/src/libraries/System.Linq/tests/AppendPrependTests.cs b/src/libraries/System.Linq/tests/AppendPrependTests.cs
index 9df154f46ff4..a42f1297bf09 100644
--- a/src/libraries/System.Linq/tests/AppendPrependTests.cs
+++ b/src/libraries/System.Linq/tests/AppendPrependTests.cs
@@ -263,5 +263,27 @@ namespace System.Linq.Tests
source = NumberRangeGuaranteedNotCollectionType(2, 2).Prepend(1).Prepend(0).Append(4).Append(5).RunOnce();
Assert.Equal(Enumerable.Range(0, 6), source.ToList());
}
+
+ [Fact]
+ public void AppendPrepend_First_Last_ElementAt()
+ {
+ Assert.Equal(42, new int[] { 42 }.Append(84).First());
+ Assert.Equal(42, new int[] { 84 }.Prepend(42).First());
+ Assert.Equal(84, new int[] { 42 }.Append(84).Last());
+ Assert.Equal(84, new int[] { 84 }.Prepend(42).Last());
+ Assert.Equal(42, new int[] { 42 }.Append(84).ElementAt(0));
+ Assert.Equal(42, new int[] { 84 }.Prepend(42).ElementAt(0));
+ Assert.Equal(84, new int[] { 42 }.Append(84).ElementAt(1));
+ Assert.Equal(84, new int[] { 84 }.Prepend(42).ElementAt(1));
+
+ Assert.Equal(42, NumberRangeGuaranteedNotCollectionType(42, 1).Append(84).First());
+ Assert.Equal(42, NumberRangeGuaranteedNotCollectionType(84, 1).Prepend(42).First());
+ Assert.Equal(84, NumberRangeGuaranteedNotCollectionType(42, 1).Append(84).Last());
+ Assert.Equal(84, NumberRangeGuaranteedNotCollectionType(84, 1).Prepend(42).Last());
+ Assert.Equal(42, NumberRangeGuaranteedNotCollectionType(42, 1).Append(84).ElementAt(0));
+ Assert.Equal(42, NumberRangeGuaranteedNotCollectionType(84, 1).Prepend(42).ElementAt(0));
+ Assert.Equal(84, NumberRangeGuaranteedNotCollectionType(42, 1).Append(84).ElementAt(1));
+ Assert.Equal(84, NumberRangeGuaranteedNotCollectionType(84, 1).Prepend(42).ElementAt(1));
+ }
}
}
diff --git a/src/libraries/System.Linq/tests/CastTests.cs b/src/libraries/System.Linq/tests/CastTests.cs
index 6577d2e72e35..6f120a13e3c1 100644
--- a/src/libraries/System.Linq/tests/CastTests.cs
+++ b/src/libraries/System.Linq/tests/CastTests.cs
@@ -1,7 +1,6 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-using System;
using System.Collections.Generic;
using Xunit;
@@ -235,5 +234,101 @@ namespace System.Linq.Tests
var en = iterator as IEnumerator<string>;
Assert.False(en != null && en.MoveNext());
}
+
+ [Fact]
+ public void TargetTypeIsSourceType_Nop()
+ {
+ object[] values = new string[] { "hello", "world" };
+ Assert.Same(values, values.Cast<string>());
+ }
+
+ [Fact]
+ public void CastOnMultidimensionalArraySucceeds()
+ {
+ Array array = Array.CreateInstance(typeof(int), 2, 3);
+ for (int i = 0; i < 2; i++)
+ {
+ for (int j = 0; j < 3; j++)
+ {
+ array.SetValue(i * 3 + j, i, j);
+ }
+ }
+
+ int[] result = array.Cast<int>().ToArray();
+ for (int i = 0; i < 6; i++)
+ {
+ Assert.Equal(i, result[i]);
+ }
+ }
+
+ [Fact]
+ public void CastCountReturnsExpectedLength()
+ {
+ object[] objects = new object[] { "hello", "world" };
+ Assert.Equal(2, objects.Cast<string>().Count());
+ }
+
+ [Fact]
+ public void CastFirstReturnsFirstElement()
+ {
+ object[] objects = new object[] { "hello", "world" };
+ Assert.Equal("hello", objects.Cast<string>().First());
+ }
+
+ [Fact]
+ public void CastFirstOnEmptySequenceThrows()
+ {
+ object[] objects = Array.Empty<object>();
+ Assert.Throws<InvalidOperationException>(() => objects.Cast<string>().First());
+ }
+
+ [Fact]
+ public void CastLastReturnsLastElement()
+ {
+ object[] objects = new object[] { "hello", "world" };
+ Assert.Equal("world", objects.Cast<string>().Last());
+ }
+
+ [Fact]
+ public void CastElementAtReturnsExpectedElement()
+ {
+ object[] objects = new object[] { "hello", "world" };
+ Assert.Equal("world", objects.Cast<string>().ElementAt(1));
+ }
+
+ [Fact]
+ public void CastElementAtOutOfRangeThrows()
+ {
+ object[] objects = new object[] { "hello", "world" };
+ Assert.Throws<ArgumentOutOfRangeException>(() => objects.Cast<string>().ElementAt(2));
+ }
+
+ [Fact]
+ public void CastLastOnEmptySequenceThrows()
+ {
+ object[] objects = Array.Empty<object>();
+ Assert.Throws<InvalidOperationException>(() => objects.Cast<string>().Last());
+ }
+
+ [Fact]
+ public void CastSelectProcessesEachElement()
+ {
+ object[] objects = new object[] { "hello", "world!" };
+ Assert.Equal(new[] { 5, 6 }, objects.Cast<string>().Select(s => s.Length));
+ }
+
+ [Fact]
+ public void CastSkipSkipsElements()
+ {
+ object[] objects = new object[] { "hello", "there", "world" };
+ Assert.Equal(new[] { "world" }, objects.Cast<string>().Skip(2));
+ }
+
+ [Fact]
+ public void CastTakeTakesElements()
+ {
+ object[] objects = new object[] { "hello", "there", "world" };
+ Assert.Equal(new[] { "hello", "there" }, objects.Cast<string>().Take(2));
+ }
}
}
diff --git a/src/libraries/System.Linq/tests/ChunkTests.cs b/src/libraries/System.Linq/tests/ChunkTests.cs
index ee3486041927..f8cfc4de6b85 100644
--- a/src/libraries/System.Linq/tests/ChunkTests.cs
+++ b/src/libraries/System.Linq/tests/ChunkTests.cs
@@ -8,6 +8,12 @@ namespace System.Linq.Tests
public class ChunkTests : EnumerableTests
{
[Fact]
+ public void Empty()
+ {
+ Assert.Equal(Enumerable.Empty<int[]>(), Enumerable.Empty<int>().Chunk(4));
+ }
+
+ [Fact]
public void ThrowsOnNullSource()
{
int[] source = null;
diff --git a/src/libraries/System.Linq/tests/ConcatTests.cs b/src/libraries/System.Linq/tests/ConcatTests.cs
index 6209d846ff24..0435bba5d7f0 100644
--- a/src/libraries/System.Linq/tests/ConcatTests.cs
+++ b/src/libraries/System.Linq/tests/ConcatTests.cs
@@ -83,6 +83,55 @@ namespace System.Linq.Tests
VerifyEqualsWorker(expected, actual);
}
+ [Theory]
+ [MemberData(nameof(ArraySourcesData))]
+ [MemberData(nameof(SelectArraySourcesData))]
+ [MemberData(nameof(EnumerableSourcesData))]
+ [MemberData(nameof(NonCollectionSourcesData))]
+ [MemberData(nameof(ListSourcesData))]
+ [MemberData(nameof(ConcatOfConcatsData))]
+ [MemberData(nameof(ConcatWithSelfData))]
+ [MemberData(nameof(ChainedCollectionConcatData))]
+ [MemberData(nameof(AppendedPrependedConcatAlternationsData))]
+ public void First_Last_ElementAt(IEnumerable<int> _, IEnumerable<int> actual)
+ {
+ int count = actual.Count();
+ if (count == 0)
+ {
+ Assert.Throws<InvalidOperationException>(() => actual.First());
+ Assert.Throws<InvalidOperationException>(() => actual.Last());
+ Assert.Throws<ArgumentOutOfRangeException>(() => actual.ElementAt(0));
+ }
+ else
+ {
+ int first = actual.First();
+ int last = actual.Last();
+ int elementAt = actual.ElementAt(count / 2);
+
+ int enumeratedFirst = 0, enumeratedLast = 0, enumeratedElementAt = 0;
+ int i = 0;
+ foreach (int item in actual)
+ {
+ if (i == 0)
+ {
+ enumeratedFirst = item;
+ }
+
+ if (i == count / 2)
+ {
+ enumeratedElementAt = item;
+ }
+
+ enumeratedLast = item;
+ i++;
+ }
+
+ Assert.Equal(enumeratedFirst, first);
+ Assert.Equal(enumeratedLast, last);
+ Assert.Equal(enumeratedElementAt, elementAt);
+ }
+ }
+
private static void VerifyEqualsWorker<T>(IEnumerable<T> expected, IEnumerable<T> actual)
{
// Returns a list of functions that, when applied to enumerable, should return
diff --git a/src/libraries/System.Linq/tests/DefaultIfEmptyTests.cs b/src/libraries/System.Linq/tests/DefaultIfEmptyTests.cs
index b4e9e13f8042..843d2a7d9e42 100644
--- a/src/libraries/System.Linq/tests/DefaultIfEmptyTests.cs
+++ b/src/libraries/System.Linq/tests/DefaultIfEmptyTests.cs
@@ -105,5 +105,24 @@ namespace System.Linq.Tests
var en = iterator as IEnumerator<int>;
Assert.False(en != null && en.MoveNext());
}
+
+ [Fact]
+ public void First_Last_ElementAt()
+ {
+ IEnumerable<int> nonEmpty = Enumerable.Range(1, 3);
+ Assert.Equal(1, nonEmpty.First());
+ Assert.Equal(3, nonEmpty.Last());
+ Assert.Equal(1, nonEmpty.ElementAt(0));
+ Assert.Equal(2, nonEmpty.ElementAt(1));
+ Assert.Equal(3, nonEmpty.ElementAt(2));
+ Assert.Throws<ArgumentOutOfRangeException>(() => nonEmpty.ElementAt(-1));
+ Assert.Throws<ArgumentOutOfRangeException>(() => nonEmpty.ElementAt(4));
+
+ IEnumerable<int> empty = Enumerable.Empty<int>();
+ Assert.Equal(42, empty.DefaultIfEmpty(42).First());
+ Assert.Equal(42, empty.DefaultIfEmpty(42).Last());
+ Assert.Equal(42, empty.DefaultIfEmpty(42).ElementAt(0));
+ Assert.Throws<ArgumentOutOfRangeException>(() => empty.DefaultIfEmpty(42).ElementAt(1));
+ }
}
}
diff --git a/src/libraries/System.Linq/tests/DistinctTests.cs b/src/libraries/System.Linq/tests/DistinctTests.cs
index 7408e96ddb38..e9de987ee048 100644
--- a/src/libraries/System.Linq/tests/DistinctTests.cs
+++ b/src/libraries/System.Linq/tests/DistinctTests.cs
@@ -304,6 +304,12 @@ namespace System.Linq.Tests
public static IEnumerable<object[]> DistinctBy_TestData()
{
yield return WrapArgs(
+ source: Array.Empty<int>(),
+ keySelector: x => x,
+ comparer: null,
+ expected: Enumerable.Empty<int>());
+
+ yield return WrapArgs(
source: Enumerable.Range(0, 10),
keySelector: x => x,
comparer: null,
diff --git a/src/libraries/System.Linq/tests/GroupByTests.cs b/src/libraries/System.Linq/tests/GroupByTests.cs
index 4b8967a28a82..5036ebe9b026 100644
--- a/src/libraries/System.Linq/tests/GroupByTests.cs
+++ b/src/libraries/System.Linq/tests/GroupByTests.cs
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
-using System.Diagnostics;
using System.Reflection;
using Xunit;
@@ -864,5 +863,29 @@ namespace System.Linq.Tests
PropertyInfo key = grouptype.GetProperty("Key", BindingFlags.Instance | BindingFlags.Public);
Assert.NotNull(key);
}
+
+ [Fact]
+ public void MultipleIterationsOfSameEnumerable()
+ {
+ foreach (IEnumerable<IGrouping<int, int>> e1 in new[] { Enumerable.Range(0, 10).GroupBy(i => i), Enumerable.Range(0, 10).GroupBy(i => i, i => i) })
+ {
+ for (int trial = 0; trial < 3; trial++)
+ {
+ int count = 0;
+ foreach (IGrouping<int, int> g in e1) count++;
+ Assert.Equal(10, count);
+ }
+ }
+
+ foreach (IEnumerable<int> e2 in new[] { Enumerable.Range(0, 10).GroupBy(i => i, (i, e) => i), Enumerable.Range(0, 10).GroupBy(i => i, i => i, (i, e) => i) })
+ {
+ for (int trial = 0; trial < 3; trial++)
+ {
+ int count = 0;
+ foreach (int i in e2) count++;
+ Assert.Equal(10, count);
+ }
+ }
+ }
}
}
diff --git a/src/libraries/System.Linq/tests/IndexTests.cs b/src/libraries/System.Linq/tests/IndexTests.cs
index 4b08820fe0e3..0742569f787d 100644
--- a/src/libraries/System.Linq/tests/IndexTests.cs
+++ b/src/libraries/System.Linq/tests/IndexTests.cs
@@ -9,6 +9,12 @@ namespace System.Linq.Tests
public class IndexTests : EnumerableTests
{
[Fact]
+ public void Empty()
+ {
+ Assert.Empty(Enumerable.Empty<int>().Index());
+ }
+
+ [Fact]
public void Index_SourceIsNull_ArgumentNullExceptionThrown()
{
IEnumerable<int> source = null;
diff --git a/src/libraries/System.Linq/tests/MaxTests.cs b/src/libraries/System.Linq/tests/MaxTests.cs
index a1509855091d..bb70a14d684c 100644
--- a/src/libraries/System.Linq/tests/MaxTests.cs
+++ b/src/libraries/System.Linq/tests/MaxTests.cs
@@ -251,6 +251,8 @@ namespace System.Linq.Tests
{
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<float>().Max());
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<float>().Max(x => x));
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<float>()).Max());
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<float>()).Max(x => x));
Assert.Throws<InvalidOperationException>(() => Array.Empty<float>().Max());
Assert.Throws<InvalidOperationException>(() => new List<float>().Max());
}
@@ -331,6 +333,8 @@ namespace System.Linq.Tests
{
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<double>().Max());
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<double>().Max(x => x));
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<double>()).Max());
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<double>()).Max(x => x));
Assert.Throws<InvalidOperationException>(() => Array.Empty<double>().Max());
Assert.Throws<InvalidOperationException>(() => new List<double>().Max());
}
@@ -397,6 +401,8 @@ namespace System.Linq.Tests
{
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<decimal>().Max());
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<decimal>().Max(x => x));
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<decimal>()).Max());
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<decimal>()).Max(x => x));
Assert.Throws<InvalidOperationException>(() => Array.Empty<decimal>().Max());
Assert.Throws<InvalidOperationException>(() => new List<decimal>().Max(x => x));
}
@@ -622,6 +628,8 @@ namespace System.Linq.Tests
{
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<DateTime>().Max());
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<DateTime>().Max(i => i));
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<DateTime>()).Max());
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<DateTime>()).Max(i => i));
}
public static IEnumerable<object[]> Max_String_TestData()
@@ -888,6 +896,7 @@ namespace System.Linq.Tests
public void Max_Boolean_EmptySource_ThrowsInvalidOperationException()
{
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<bool>().Max());
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<bool>()).Max());
}
[Fact]
diff --git a/src/libraries/System.Linq/tests/MinTests.cs b/src/libraries/System.Linq/tests/MinTests.cs
index feca6994d066..0cc72fa43a10 100644
--- a/src/libraries/System.Linq/tests/MinTests.cs
+++ b/src/libraries/System.Linq/tests/MinTests.cs
@@ -136,6 +136,8 @@ namespace System.Linq.Tests
{
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<int>().Min());
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<int>().Min(x => x));
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<int>()).Min());
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<int>()).Min(x => x));
Assert.Throws<InvalidOperationException>(() => Array.Empty<int>().Min());
Assert.Throws<InvalidOperationException>(() => new List<int>().Min());
}
@@ -182,6 +184,8 @@ namespace System.Linq.Tests
{
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<long>().Min());
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<long>().Min(x => x));
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<long>()).Min());
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<long>()).Min(x => x));
Assert.Throws<InvalidOperationException>(() => Array.Empty<long>().Min());
Assert.Throws<InvalidOperationException>(() => new List<long>().Min());
}
@@ -250,6 +254,8 @@ namespace System.Linq.Tests
{
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<float>().Min());
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<float>().Min(x => x));
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<float>()).Min());
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<float>()).Min(x => x));
Assert.Throws<InvalidOperationException>(() => Array.Empty<float>().Min());
Assert.Throws<InvalidOperationException>(() => new List<float>().Min());
}
@@ -316,6 +322,8 @@ namespace System.Linq.Tests
{
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<double>().Min());
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<double>().Min(x => x));
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<double>()).Min());
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<double>()).Min(x => x));
Assert.Throws<InvalidOperationException>(() => Array.Empty<double>().Min());
Assert.Throws<InvalidOperationException>(() => new List<double>().Min());
}
@@ -355,6 +363,8 @@ namespace System.Linq.Tests
{
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<decimal>().Min());
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<decimal>().Min(x => x));
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<decimal>()).Min());
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<decimal>()).Min(x => x));
Assert.Throws<InvalidOperationException>(() => Array.Empty<decimal>().Min());
Assert.Throws<InvalidOperationException>(() => new List<decimal>().Min());
}
@@ -595,6 +605,8 @@ namespace System.Linq.Tests
{
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<DateTime>().Min());
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<DateTime>().Min(x => x));
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<DateTime>()).Min());
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<DateTime>()).Min(x => x));
Assert.Throws<InvalidOperationException>(() => Array.Empty<DateTime>().Min());
Assert.Throws<InvalidOperationException>(() => new List<DateTime>().Min());
}
@@ -858,6 +870,7 @@ namespace System.Linq.Tests
public void Min_Bool_EmptySource_ThrowsInvalodOperationException()
{
Assert.Throws<InvalidOperationException>(() => Enumerable.Empty<bool>().Min());
+ Assert.Throws<InvalidOperationException>(() => ForceNotCollection(Enumerable.Empty<bool>()).Min());
}
[Fact]
diff --git a/src/libraries/System.Linq/tests/OrderTests.cs b/src/libraries/System.Linq/tests/OrderTests.cs
index ed2dd9bfc876..dee76efe7382 100644
--- a/src/libraries/System.Linq/tests/OrderTests.cs
+++ b/src/libraries/System.Linq/tests/OrderTests.cs
@@ -196,6 +196,9 @@ namespace System.Linq.Tests
{
Assert.Equal(0, Enumerable.Range(0, 10).Shuffle().Order().First());
Assert.Equal(9, Enumerable.Range(0, 10).Shuffle().OrderDescending().First());
+
+ Assert.Equal(0, ForceNotCollection(Enumerable.Range(0, 10).Shuffle()).Order().First());
+ Assert.Equal(9, ForceNotCollection(Enumerable.Range(0, 10).Shuffle()).OrderDescending().First());
}
[Fact]
@@ -281,6 +284,9 @@ namespace System.Linq.Tests
{
Assert.Equal(9, Enumerable.Range(0, 10).Shuffle().Order().Last());
Assert.Equal(0, Enumerable.Range(0, 10).Shuffle().OrderDescending().Last());
+
+ Assert.Equal(9, ForceNotCollection(Enumerable.Range(0, 10).Shuffle()).Order().Last());
+ Assert.Equal(0, ForceNotCollection(Enumerable.Range(0, 10).Shuffle()).OrderDescending().Last());
}
[Fact]
@@ -308,6 +314,16 @@ namespace System.Linq.Tests
}
[Fact]
+ public void ElementAtOnOrdered()
+ {
+ Assert.Equal(4, Enumerable.Range(0, 10).Shuffle().Order().ElementAt(4));
+ Assert.Equal(5, Enumerable.Range(0, 10).Shuffle().OrderDescending().ElementAt(4));
+
+ Assert.Equal(4, ForceNotCollection(Enumerable.Range(0, 10).Shuffle()).Order().ElementAt(4));
+ Assert.Equal(5, ForceNotCollection(Enumerable.Range(0, 10).Shuffle()).OrderDescending().ElementAt(4));
+ }
+
+ [Fact]
public void EnumeratorDoesntContinue()
{
var enumerator = NumberRangeGuaranteedNotCollectionType(0, 3).Shuffle().Order().GetEnumerator();
diff --git a/src/libraries/System.Linq/tests/RangeTests.cs b/src/libraries/System.Linq/tests/RangeTests.cs
index 8421a66ba890..2e331cfee7ec 100644
--- a/src/libraries/System.Linq/tests/RangeTests.cs
+++ b/src/libraries/System.Linq/tests/RangeTests.cs
@@ -243,6 +243,7 @@ namespace System.Linq.Tests
Assert.Throws<NotSupportedException>(() => list.Insert(0, 42));
Assert.Throws<NotSupportedException>(() => list.Clear());
Assert.Throws<NotSupportedException>(() => list.Remove(42));
+ Assert.Throws<NotSupportedException>(() => list.RemoveAt(0));
Assert.Throws<NotSupportedException>(() => list[0] = 42);
AssertExtensions.Throws<ArgumentOutOfRangeException>("index", () => list[-1]);
AssertExtensions.Throws<ArgumentOutOfRangeException>("index", () => list[expected.Length]);
@@ -255,6 +256,8 @@ namespace System.Linq.Tests
Assert.False(list.Contains(expected[0] - 1));
Assert.False(list.Contains(expected[^1] + 1));
+ Assert.Equal(-1, list.IndexOf(expected[0] - 1));
+ Assert.Equal(-1, list.IndexOf(expected[^1] + 1));
Assert.All(expected, i => Assert.True(list.Contains(i)));
Assert.All(expected, i => Assert.Equal(Array.IndexOf(expected, i), list.IndexOf(i)));
for (int i = 0; i < expected.Length; i++)
diff --git a/src/libraries/System.Linq/tests/RepeatTests.cs b/src/libraries/System.Linq/tests/RepeatTests.cs
index 625dff376de3..df8eebda3569 100644
--- a/src/libraries/System.Linq/tests/RepeatTests.cs
+++ b/src/libraries/System.Linq/tests/RepeatTests.cs
@@ -255,6 +255,7 @@ namespace System.Linq.Tests
Assert.Throws<NotSupportedException>(() => list.Insert(0, 42));
Assert.Throws<NotSupportedException>(() => list.Clear());
Assert.Throws<NotSupportedException>(() => list.Remove(42));
+ Assert.Throws<NotSupportedException>(() => list.RemoveAt(0));
Assert.Throws<NotSupportedException>(() => list[0] = 42);
AssertExtensions.Throws<ArgumentOutOfRangeException>("index", () => list[-1]);
AssertExtensions.Throws<ArgumentOutOfRangeException>("index", () => list[expected.Length]);
@@ -267,6 +268,8 @@ namespace System.Linq.Tests
Assert.False(list.Contains(expected[0] - 1));
Assert.False(list.Contains(expected[^1] + 1));
+ Assert.Equal(-1, list.IndexOf(expected[0] - 1));
+ Assert.Equal(-1, list.IndexOf(expected[^1] + 1));
Assert.All(expected, i => Assert.True(list.Contains(i)));
Assert.All(expected, i => Assert.Equal(Array.IndexOf(expected, i), list.IndexOf(i)));
for (int i = 0; i < expected.Length; i++)
diff --git a/src/libraries/System.Linq/tests/SequenceEqualTests.cs b/src/libraries/System.Linq/tests/SequenceEqualTests.cs
index 380916550efd..7393d18947aa 100644
--- a/src/libraries/System.Linq/tests/SequenceEqualTests.cs
+++ b/src/libraries/System.Linq/tests/SequenceEqualTests.cs
@@ -1,8 +1,7 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-using System;
-using System.Collections.Generic;
+using System.Collections.ObjectModel;
using Xunit;
namespace System.Linq.Tests
@@ -246,5 +245,31 @@ namespace System.Linq.Tests
}
}
}
+
+ [Fact]
+ public void ICollectionsCompareCorrectly()
+ {
+ Assert.True(new TestCollection<int>([]).SequenceEqual(new TestCollection<int>([])));
+ Assert.True(new TestCollection<int>([1]).SequenceEqual(new TestCollection<int>([1])));
+ Assert.True(new TestCollection<int>([1, 2, 3]).SequenceEqual(new TestCollection<int>([1, 2, 3])));
+
+ Assert.False(new TestCollection<int>([1, 2, 3, 4]).SequenceEqual(new TestCollection<int>([1, 2, 3])));
+ Assert.False(new TestCollection<int>([1, 2, 3]).SequenceEqual(new TestCollection<int>([1, 2, 3, 4])));
+ Assert.False(new TestCollection<int>([1, 2, 3]).SequenceEqual(new TestCollection<int>([1, 2, 4])));
+ Assert.False(new TestCollection<int>([-1, 2, 3]).SequenceEqual(new TestCollection<int>([-2, 2, 3])));
+ }
+
+ [Fact]
+ public void IListsCompareCorrectly()
+ {
+ Assert.True(new ReadOnlyCollection<int>([]).SequenceEqual(new ReadOnlyCollection<int>([])));
+ Assert.True(new ReadOnlyCollection<int>([1]).SequenceEqual(new ReadOnlyCollection<int>([1])));
+ Assert.True(new ReadOnlyCollection<int>([1, 2, 3]).SequenceEqual(new ReadOnlyCollection<int>([1, 2, 3])));
+
+ Assert.False(new ReadOnlyCollection<int>([1, 2, 3, 4]).SequenceEqual(new ReadOnlyCollection<int>([1, 2, 3])));
+ Assert.False(new ReadOnlyCollection<int>([1, 2, 3]).SequenceEqual(new ReadOnlyCollection<int>([1, 2, 3, 4])));
+ Assert.False(new ReadOnlyCollection<int>([1, 2, 3]).SequenceEqual(new ReadOnlyCollection<int>([1, 2, 4])));
+ Assert.False(new ReadOnlyCollection<int>([-1, 2, 3]).SequenceEqual(new ReadOnlyCollection<int>([-2, 2, 3])));
+ }
}
}
diff --git a/src/libraries/System.Linq/tests/SkipLastTests.cs b/src/libraries/System.Linq/tests/SkipLastTests.cs
index fe9652a875e9..c4770410870d 100644
--- a/src/libraries/System.Linq/tests/SkipLastTests.cs
+++ b/src/libraries/System.Linq/tests/SkipLastTests.cs
@@ -9,6 +9,12 @@ namespace System.Linq.Tests
{
public class SkipLastTests : EnumerableTests
{
+ [Fact]
+ public void SkipLastThrowsOnNull()
+ {
+ AssertExtensions.Throws<ArgumentNullException>("source", () => ((IEnumerable<int>)null).SkipLast(10));
+ }
+
[Theory]
[MemberData(nameof(EnumerableData), MemberType = typeof(SkipTakeData))]
public void SkipLast(IEnumerable<int> source, int count)
diff --git a/src/libraries/System.Linq/tests/SkipWhileTests.cs b/src/libraries/System.Linq/tests/SkipWhileTests.cs
index 26281efc5f86..75a909670428 100644
--- a/src/libraries/System.Linq/tests/SkipWhileTests.cs
+++ b/src/libraries/System.Linq/tests/SkipWhileTests.cs
@@ -1,16 +1,21 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-using System.Collections;
using System.Collections.Generic;
using Xunit;
-using Xunit.Abstractions;
namespace System.Linq.Tests
{
public class SkipWhileTests : EnumerableTests
{
[Fact]
+ public void Empty()
+ {
+ Assert.Equal(Enumerable.Empty<int>(), Enumerable.Empty<int>().SkipWhile(i => i < 40));
+ Assert.Equal(Enumerable.Empty<int>(), Enumerable.Empty<int>().SkipWhile((i, index) => i < 40));
+ }
+
+ [Fact]
public void SkipWhileAllTrue()
{
Assert.Equal(Enumerable.Empty<int>(), Enumerable.Range(0, 20).SkipWhile(i => i < 40));
diff --git a/src/libraries/System.Linq/tests/TakeLastTests.cs b/src/libraries/System.Linq/tests/TakeLastTests.cs
index 31b58d5bf017..b39d59e94263 100644
--- a/src/libraries/System.Linq/tests/TakeLastTests.cs
+++ b/src/libraries/System.Linq/tests/TakeLastTests.cs
@@ -9,6 +9,12 @@ namespace System.Linq.Tests
{
public class TakeLastTests : EnumerableTests
{
+ [Fact]
+ public void SkipLastThrowsOnNull()
+ {
+ AssertExtensions.Throws<ArgumentNullException>("source", () => ((IEnumerable<int>)null).TakeLast(10));
+ }
+
[Theory]
[MemberData(nameof(EnumerableData), MemberType = typeof(SkipTakeData))]
public void TakeLast(IEnumerable<int> source, int count)
diff --git a/src/libraries/System.Linq/tests/TakeTests.cs b/src/libraries/System.Linq/tests/TakeTests.cs
index 93a0405bfaf3..b19c69ab0fd9 100644
--- a/src/libraries/System.Linq/tests/TakeTests.cs
+++ b/src/libraries/System.Linq/tests/TakeTests.cs
@@ -2,6 +2,7 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
+using System.Collections.ObjectModel;
using Xunit;
namespace System.Linq.Tests
@@ -2031,5 +2032,37 @@ namespace System.Linq.Tests
Assert.Empty(EnumerablePartitionOrEmpty(source).Take(3..^8));
Assert.Empty(EnumerablePartitionOrEmpty(source).Take(^6..^7));
}
+
+ [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsSpeedOptimized))]
+ public void SkipTakeOnIListIsIList()
+ {
+ IList<int> list = new ReadOnlyCollection<int>(Enumerable.Range(0, 100).ToList());
+ IList<int> skipTake = Assert.IsAssignableFrom<IList<int>>(list.Skip(10).Take(20));
+
+ Assert.True(skipTake.IsReadOnly);
+ Assert.Equal(20, skipTake.Count);
+ int[] results = new int[20];
+ skipTake.CopyTo(results, 0);
+ for (int i = 0; i < 20; i++)
+ {
+ Assert.Equal(i + 10, skipTake[i]);
+ Assert.Equal(i + 10, results[i]);
+ Assert.True(skipTake.Contains(i + 10));
+ Assert.True(skipTake.IndexOf(i + 10) == i);
+ }
+
+ Assert.False(skipTake.Contains(9));
+ Assert.False(skipTake.Contains(30));
+
+ Assert.Throws<ArgumentOutOfRangeException>(() => skipTake[-1]);
+ Assert.Throws<ArgumentOutOfRangeException>(() => skipTake[20]);
+
+ Assert.Throws<NotSupportedException>(() => skipTake.Add(42));
+ Assert.Throws<NotSupportedException>(() => skipTake.Clear());
+ Assert.Throws<NotSupportedException>(() => skipTake.Insert(0, 42));
+ Assert.Throws<NotSupportedException>(() => skipTake.Remove(42));
+ Assert.Throws<NotSupportedException>(() => skipTake.RemoveAt(0));
+ Assert.Throws<NotSupportedException>(() => skipTake[0] = 42);
+ }
}
}
diff --git a/src/libraries/System.Linq/tests/TakeWhileTests.cs b/src/libraries/System.Linq/tests/TakeWhileTests.cs
index 55f02459978a..18589d97192b 100644
--- a/src/libraries/System.Linq/tests/TakeWhileTests.cs
+++ b/src/libraries/System.Linq/tests/TakeWhileTests.cs
@@ -1,7 +1,6 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-using System.Collections;
using System.Collections.Generic;
using Xunit;
@@ -10,6 +9,13 @@ namespace System.Linq.Tests
public class TakeWhileTests : EnumerableTests
{
[Fact]
+ public void Empty()
+ {
+ Assert.Equal(Enumerable.Empty<int>(), Enumerable.Empty<int>().TakeWhile(i => i < 40));
+ Assert.Equal(Enumerable.Empty<int>(), Enumerable.Empty<int>().TakeWhile((i, index) => i < 40));
+ }
+
+ [Fact]
public void SameResultsRepeatCallsIntQuery()
{
var q = from x in new[] { 9999, 0, 888, -1, 66, -777, 1, 2, -12345 }
diff --git a/src/libraries/System.Linq/tests/ToLookupTests.cs b/src/libraries/System.Linq/tests/ToLookupTests.cs
index 458aaa9e4dde..d2f81e9a662d 100644
--- a/src/libraries/System.Linq/tests/ToLookupTests.cs
+++ b/src/libraries/System.Linq/tests/ToLookupTests.cs
@@ -1,10 +1,8 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-using System;
+using System.Collections;
using System.Collections.Generic;
-using System.Diagnostics;
-using System.Reflection;
using Xunit;
namespace System.Linq.Tests
@@ -54,6 +52,14 @@ namespace System.Linq.Tests
}
[Fact]
+ public void Empty()
+ {
+ AssertMatches(Enumerable.Empty<int>(), Enumerable.Empty<int>(), Enumerable.Empty<int>().ToLookup(i => i));
+ Assert.False(Enumerable.Empty<int>().ToLookup(i => i).Contains(0));
+ Assert.Empty(Enumerable.Empty<int>().ToLookup(i => i)[0]);
+ }
+
+ [Fact]
public void NullKeyIncluded()
{
string[] key = { "Chris", "Bob", null, "Tim" };
@@ -289,6 +295,59 @@ namespace System.Linq.Tests
Assert.Equal(expected, result);
}
+ [Fact]
+ public void ApplyResultSelector()
+ {
+ Lookup<int, int> lookup = (Lookup<int, int>)new int[] { 1, 2, 2, 3, 3, 3 }.ToLookup(i => i);
+ IEnumerable<int> sums = lookup.ApplyResultSelector((key, elements) =>
+ {
+ Assert.Equal(key, elements.Count());
+ return elements.Sum();
+ });
+ Assert.Equal([1, 4, 9], sums);
+ }
+
+ [Theory]
+ [InlineData(0)]
+ [InlineData(1)]
+ [InlineData(10)]
+ public void LookupImplementsICollection(int count)
+ {
+ Assert.IsAssignableFrom<ICollection<IGrouping<string, int>>>(Enumerable.Range(0, count).ToLookup(i => i.ToString()));
+ Assert.IsAssignableFrom<ICollection<IGrouping<string, int>>>(Enumerable.Range(0, count).ToLookup(i => i.ToString(), StringComparer.OrdinalIgnoreCase));
+ Assert.IsAssignableFrom<ICollection<IGrouping<string, int>>>(Enumerable.Range(0, count).ToLookup(i => i.ToString(), i => i));
+ Assert.IsAssignableFrom<ICollection<IGrouping<string, int>>>(Enumerable.Range(0, count).ToLookup(i => i.ToString(), i => i, StringComparer.OrdinalIgnoreCase));
+
+ var collection = (ICollection<IGrouping<string, int>>)Enumerable.Range(0, count).ToLookup(i => i.ToString());
+ Assert.Equal(count, collection.Count);
+ Assert.True(collection.IsReadOnly);
+ Assert.Throws<NotSupportedException>(() => collection.Add(null));
+ Assert.Throws<NotSupportedException>(() => collection.Remove(null));
+ Assert.Throws<NotSupportedException>(() => collection.Clear());
+
+ if (count > 0)
+ {
+ IGrouping<string, int> first = collection.First();
+ IGrouping<string, int> last = collection.Last();
+ Assert.True(collection.Contains(first));
+ Assert.True(collection.Contains(last));
+ }
+ Assert.False(collection.Contains(new NopGrouping()));
+
+ IGrouping<string, int>[] items = new IGrouping<string, int>[count];
+ collection.CopyTo(items, 0);
+ Assert.Equal(collection.Select(i => i), items);
+ Assert.Equal(items, Enumerable.Range(0, count).ToLookup(i => i.ToString()).ToArray());
+ Assert.Equal(items, Enumerable.Range(0, count).ToLookup(i => i.ToString()).ToList());
+ }
+
+ private sealed class NopGrouping : IGrouping<string, int>
+ {
+ public string Key => "";
+ public IEnumerator<int> GetEnumerator() => ((IList<int>)Array.Empty<int>()).GetEnumerator();
+ IEnumerator IEnumerable.GetEnumerator() => GetEnumerator();
+ }
+
public class Membership
{
public int Id { get; set; }
diff --git a/src/libraries/System.Linq/tests/WhereTests.cs b/src/libraries/System.Linq/tests/WhereTests.cs
index a6bc625e9e49..a25b3f1abf9c 100644
--- a/src/libraries/System.Linq/tests/WhereTests.cs
+++ b/src/libraries/System.Linq/tests/WhereTests.cs
@@ -1094,6 +1094,50 @@ namespace System.Linq.Tests
}
}
+ [Fact]
+ public void WhereFirstLast()
+ {
+ Assert.All(IdentityTransforms<int>(), transform =>
+ {
+ IEnumerable<int> data = transform(Enumerable.Range(0, 10));
+
+ Assert.Equal(3, data.Where(i => i == 3).First());
+ Assert.Equal(0, data.Where(i => i % 2 == 0).First());
+
+ Assert.Equal(3, data.Where(i => i == 3).Last());
+ Assert.Equal(8, data.Where(i => i % 2 == 0).Last());
+
+ Assert.Equal(3, data.Where(i => i == 3).ElementAt(0));
+ Assert.Equal(8, data.Where(i => i % 2 == 0).ElementAt(4));
+
+ Assert.Throws<InvalidOperationException>(() => data.Where(i => i == 10).First());
+ Assert.Throws<InvalidOperationException>(() => data.Where(i => i == 10).Last());
+ Assert.Throws<ArgumentOutOfRangeException>(() => data.Where(i => i == 10).ElementAt(0));
+ });
+ }
+
+ [Fact]
+ public void WhereSelectFirstLast()
+ {
+ Assert.All(IdentityTransforms<int>(), transform =>
+ {
+ IEnumerable<int> data = transform(Enumerable.Range(0, 10));
+
+ Assert.Equal(6, data.Where(i => i == 3).Select(i => i * 2).First());
+ Assert.Equal(0, data.Where(i => i % 2 == 0).Select(i => i * 2).First());
+
+ Assert.Equal(6, data.Where(i => i == 3).Select(i => i * 2).Last());
+ Assert.Equal(16, data.Where(i => i % 2 == 0).Select(i => i * 2).Last());
+
+ Assert.Equal(6, data.Where(i => i == 3).Select(i => i * 2).ElementAt(0));
+ Assert.Equal(16, data.Where(i => i % 2 == 0).Select(i => i * 2).ElementAt(4));
+
+ Assert.Throws<InvalidOperationException>(() => data.Where(i => i == 10).Select(i => i * 2).First());
+ Assert.Throws<InvalidOperationException>(() => data.Where(i => i == 10).Select(i => i * 2).Last());
+ Assert.Throws<ArgumentOutOfRangeException>(() => data.Where(i => i == 10).Select(i => i * 2).ElementAt(0));
+ });
+ }
+
public static IEnumerable<object[]> ToCollectionData()
{
IEnumerable<int> seq = GenerateRandomSequnce(seed: 0xdeadbeef, count: 10);
diff --git a/src/libraries/System.Management/src/System/Management/ManagementDateTime.cs b/src/libraries/System.Management/src/System/Management/ManagementDateTime.cs
index 34860c2ff7e2..2ba36e619f18 100644
--- a/src/libraries/System.Management/src/System/Management/ManagementDateTime.cs
+++ b/src/libraries/System.Management/src/System/Management/ManagementDateTime.cs
@@ -193,7 +193,7 @@ namespace System.Management
throw new ArgumentOutOfRangeException(nameof(dmtfDate));
}
-
+ // codeql[cs/leap-year/unsafe-date-construction-from-two-elements] - DateTime not constructed from multiple elements - it's parsed from a string with defaults that are stable DateTime.MinValue. It would be intentional to throw if an invalid combination occurred.
var datetime = new DateTime(year, month, day, hour, minute, second, 0, DateTimeKind.Local);
// Then add the ticks calculated from the microseconds
datetime = datetime.AddTicks(ticks);
diff --git a/src/libraries/System.Net.Http/src/System/Net/Http/BrowserHttpHandler/BrowserHttpHandler.cs b/src/libraries/System.Net.Http/src/System/Net/Http/BrowserHttpHandler/BrowserHttpHandler.cs
index bbcd625d036d..050913e6f868 100644
--- a/src/libraries/System.Net.Http/src/System/Net/Http/BrowserHttpHandler/BrowserHttpHandler.cs
+++ b/src/libraries/System.Net.Http/src/System/Net/Http/BrowserHttpHandler/BrowserHttpHandler.cs
@@ -511,7 +511,6 @@ namespace System.Net.Http
public override async ValueTask<int> ReadAsync(Memory<byte> buffer, CancellationToken cancellationToken)
{
- ArgumentNullException.ThrowIfNull(buffer, nameof(buffer));
_controller.ThrowIfDisposed();
MemoryHandle pinBuffer = buffer.Pin();
diff --git a/src/libraries/System.Net.Primitives/src/System/Net/IPAddress.cs b/src/libraries/System.Net.Primitives/src/System/Net/IPAddress.cs
index bdd381fc0490..48f424407b40 100644
--- a/src/libraries/System.Net.Primitives/src/System/Net/IPAddress.cs
+++ b/src/libraries/System.Net.Primitives/src/System/Net/IPAddress.cs
@@ -142,7 +142,6 @@ namespace System.Net
internal IPAddress(ReadOnlySpan<ushort> numbers, uint scopeid)
{
- Debug.Assert(numbers != null);
Debug.Assert(numbers.Length == NumberOfLabels);
_numbers = numbers.ToArray();
diff --git a/src/libraries/System.Net.Primitives/src/System/Net/IPAddressParser.cs b/src/libraries/System.Net.Primitives/src/System/Net/IPAddressParser.cs
index 964cd4308366..afe80e3cd5b8 100644
--- a/src/libraries/System.Net.Primitives/src/System/Net/IPAddressParser.cs
+++ b/src/libraries/System.Net.Primitives/src/System/Net/IPAddressParser.cs
@@ -67,7 +67,6 @@ namespace System.Net
private static unsafe bool TryParseIPv6(ReadOnlySpan<char> ipSpan, Span<ushort> numbers, int numbersLength, out uint scope)
{
- Debug.Assert(numbers != null);
Debug.Assert(numbersLength >= IPAddressParserStatics.IPv6AddressShorts);
int end = ipSpan.Length;
diff --git a/src/libraries/System.Net.Quic/src/System/Net/Quic/Internal/MsQuicApi.NativeMethods.cs b/src/libraries/System.Net.Quic/src/System/Net/Quic/Internal/MsQuicApi.NativeMethods.cs
index 206eac76ac78..6906392f79eb 100644
--- a/src/libraries/System.Net.Quic/src/System/Net/Quic/Internal/MsQuicApi.NativeMethods.cs
+++ b/src/libraries/System.Net.Quic/src/System/Net/Quic/Internal/MsQuicApi.NativeMethods.cs
@@ -375,4 +375,55 @@ internal sealed unsafe partial class MsQuicApi
}
}
}
+
+ public int DatagramSend(MsQuicSafeHandle connection, QUIC_BUFFER* buffers, uint buffersCount, QUIC_SEND_FLAGS flags, void* context)
+ {
+ bool success = false;
+ try
+ {
+ connection.DangerousAddRef(ref success);
+ return ApiTable->DatagramSend(connection.QuicHandle, buffers, buffersCount, flags, context);
+ }
+ finally
+ {
+ if (success)
+ {
+ connection.DangerousRelease();
+ }
+ }
+ }
+
+ public int ConnectionResumptionTicketValidationComplete(MsQuicSafeHandle connection, byte result)
+ {
+ bool success = false;
+ try
+ {
+ connection.DangerousAddRef(ref success);
+ return ApiTable->ConnectionResumptionTicketValidationComplete(connection.QuicHandle, result);
+ }
+ finally
+ {
+ if (success)
+ {
+ connection.DangerousRelease();
+ }
+ }
+ }
+
+ public int ConnectionCertificateValidationComplete(MsQuicSafeHandle connection, byte result, QUIC_TLS_ALERT_CODES alert)
+ {
+ bool success = false;
+ try
+ {
+ connection.DangerousAddRef(ref success);
+ return ApiTable->ConnectionCertificateValidationComplete(connection.QuicHandle, result, alert);
+ }
+ finally
+ {
+ if (success)
+ {
+ connection.DangerousRelease();
+ }
+ }
+ }
}
diff --git a/src/libraries/System.Net.Quic/src/System/Net/Quic/Internal/MsQuicApi.cs b/src/libraries/System.Net.Quic/src/System/Net/Quic/Internal/MsQuicApi.cs
index e89119844c74..4b284284f526 100644
--- a/src/libraries/System.Net.Quic/src/System/Net/Quic/Internal/MsQuicApi.cs
+++ b/src/libraries/System.Net.Quic/src/System/Net/Quic/Internal/MsQuicApi.cs
@@ -54,11 +54,16 @@ internal sealed unsafe partial class MsQuicApi
private static readonly Lazy<MsQuicApi> _api = new Lazy<MsQuicApi>(AllocateMsQuicApi);
internal static MsQuicApi Api => _api.Value;
+ internal static Version? Version { get; private set; }
+
internal static bool IsQuicSupported { get; }
internal static string MsQuicLibraryVersion { get; } = "unknown";
internal static string? NotSupportedReason { get; }
+ // workaround for https://github.com/microsoft/msquic/issues/4132
+ internal static bool SupportsAsyncCertValidation => Version >= new Version(2, 4, 0);
+
internal static bool UsesSChannelBackend { get; }
internal static bool Tls13ServerMayBeDisabled { get; }
@@ -69,6 +74,7 @@ internal sealed unsafe partial class MsQuicApi
{
bool loaded = false;
IntPtr msQuicHandle;
+ Version = default;
// MsQuic is using DualMode sockets and that will fail even for IPv4 if AF_INET6 is not available.
if (!Socket.OSSupportsIPv6)
@@ -135,7 +141,7 @@ internal sealed unsafe partial class MsQuicApi
}
return;
}
- Version version = new Version((int)libVersion[0], (int)libVersion[1], (int)libVersion[2], (int)libVersion[3]);
+ Version = new Version((int)libVersion[0], (int)libVersion[1], (int)libVersion[2], (int)libVersion[3]);
paramSize = 64 * sizeof(sbyte);
sbyte* libGitHash = stackalloc sbyte[64];
@@ -150,11 +156,11 @@ internal sealed unsafe partial class MsQuicApi
}
string? gitHash = Marshal.PtrToStringUTF8((IntPtr)libGitHash);
- MsQuicLibraryVersion = $"{Interop.Libraries.MsQuic} {version} ({gitHash})";
+ MsQuicLibraryVersion = $"{Interop.Libraries.MsQuic} {Version} ({gitHash})";
- if (version < s_minMsQuicVersion)
+ if (Version < s_minMsQuicVersion)
{
- NotSupportedReason = $"Incompatible MsQuic library version '{version}', expecting higher than '{s_minMsQuicVersion}'.";
+ NotSupportedReason = $"Incompatible MsQuic library version '{Version}', expecting higher than '{s_minMsQuicVersion}'.";
if (NetEventSource.Log.IsEnabled())
{
NetEventSource.Info(null, NotSupportedReason);
diff --git a/src/libraries/System.Net.Quic/src/System/Net/Quic/QuicConnection.SslConnectionOptions.cs b/src/libraries/System.Net.Quic/src/System/Net/Quic/QuicConnection.SslConnectionOptions.cs
index dad23bfc342c..1b352f100454 100644
--- a/src/libraries/System.Net.Quic/src/System/Net/Quic/QuicConnection.SslConnectionOptions.cs
+++ b/src/libraries/System.Net.Quic/src/System/Net/Quic/QuicConnection.SslConnectionOptions.cs
@@ -1,10 +1,13 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
+using System.Buffers;
+using System.Diagnostics;
using System.Net.Security;
using System.Security.Authentication;
using System.Security.Cryptography;
using System.Security.Cryptography.X509Certificates;
+using System.Threading.Tasks;
using Microsoft.Quic;
using static Microsoft.Quic.MsQuic;
@@ -63,18 +66,122 @@ public partial class QuicConnection
_certificateChainPolicy = certificateChainPolicy;
}
- public unsafe int ValidateCertificate(QUIC_BUFFER* certificatePtr, QUIC_BUFFER* chainPtr, out X509Certificate2? certificate)
+ internal async Task<bool> StartAsyncCertificateValidation(IntPtr certificatePtr, IntPtr chainPtr)
+ {
+ //
+ // The provided data pointers are valid only while still inside this function, so they need to be
+ // copied to separate buffers which are then handed off to threadpool.
+ //
+
+ X509Certificate2? certificate = null;
+
+ byte[]? certDataRented = null;
+ Memory<byte> certData = default;
+ byte[]? chainDataRented = null;
+ Memory<byte> chainData = default;
+
+ if (certificatePtr != IntPtr.Zero)
+ {
+ if (MsQuicApi.UsesSChannelBackend)
+ {
+ // provided data is a pointer to a CERT_CONTEXT
+ certificate = new X509Certificate2(certificatePtr);
+ // TODO: what about chainPtr?
+ }
+ else
+ {
+ unsafe
+ {
+ // On non-SChannel backends we specify USE_PORTABLE_CERTIFICATES and the contents are buffers
+ // with DER encoded cert and chain.
+ QUIC_BUFFER* certificateBuffer = (QUIC_BUFFER*)certificatePtr;
+ QUIC_BUFFER* chainBuffer = (QUIC_BUFFER*)chainPtr;
+
+ if (certificateBuffer->Length > 0)
+ {
+ certDataRented = ArrayPool<byte>.Shared.Rent((int)certificateBuffer->Length);
+ certData = certDataRented.AsMemory(0, (int)certificateBuffer->Length);
+ certificateBuffer->Span.CopyTo(certData.Span);
+ }
+
+ if (chainBuffer->Length > 0)
+ {
+ chainDataRented = ArrayPool<byte>.Shared.Rent((int)chainBuffer->Length);
+ chainData = chainDataRented.AsMemory(0, (int)chainBuffer->Length);
+ chainBuffer->Span.CopyTo(chainData.Span);
+ }
+ }
+ }
+ }
+
+ // We wan't to do the certificate validation asynchronously, but due to a bug in MsQuic, we need to call the callback synchronously on some versions
+ if (MsQuicApi.SupportsAsyncCertValidation)
+ {
+ // force yield to the thread pool to free up MsQuic worker thread.
+ await Task.CompletedTask.ConfigureAwait(ConfigureAwaitOptions.ForceYielding);
+ }
+
+ // certificatePtr and chainPtr are invalid beyond this point
+
+ QUIC_TLS_ALERT_CODES result;
+ try
+ {
+ if (certData.Length > 0)
+ {
+ Debug.Assert(certificate == null);
+ certificate = new X509Certificate2(certData.Span);
+ }
+
+ result = _connection._sslConnectionOptions.ValidateCertificate(certificate, certData.Span, chainData.Span);
+ _connection._remoteCertificate = certificate;
+ }
+ catch (Exception ex)
+ {
+ certificate?.Dispose();
+ _connection._connectedTcs.TrySetException(ex);
+ result = QUIC_TLS_ALERT_CODES.USER_CANCELED;
+ }
+ finally
+ {
+ if (certDataRented != null)
+ {
+ ArrayPool<byte>.Shared.Return(certDataRented);
+ }
+
+ if (chainDataRented != null)
+ {
+ ArrayPool<byte>.Shared.Return(chainDataRented);
+ }
+ }
+
+ if (MsQuicApi.SupportsAsyncCertValidation)
+ {
+ int status = MsQuicApi.Api.ConnectionCertificateValidationComplete(
+ _connection._handle,
+ result == QUIC_TLS_ALERT_CODES.SUCCESS ? (byte)1 : (byte)0,
+ result);
+
+ if (MsQuic.StatusFailed(status))
+ {
+ if (NetEventSource.Log.IsEnabled())
+ {
+ NetEventSource.Error(_connection, $"{_connection} ConnectionCertificateValidationComplete failed with {ThrowHelper.GetErrorMessageForStatus(status)}");
+ }
+ }
+ }
+
+ return result == QUIC_TLS_ALERT_CODES.SUCCESS;
+ }
+
+ private QUIC_TLS_ALERT_CODES ValidateCertificate(X509Certificate2? certificate, Span<byte> certData, Span<byte> chainData)
{
SslPolicyErrors sslPolicyErrors = SslPolicyErrors.None;
- IntPtr certificateBuffer = 0;
- int certificateLength = 0;
bool wrapException = false;
X509Chain? chain = null;
- X509Certificate2? result = null;
try
{
- if (certificatePtr is not null)
+ if (certificate is not null)
{
chain = new X509Chain();
if (_certificateChainPolicy != null)
@@ -96,43 +203,26 @@ public partial class QuicConnection
chain.ChainPolicy.ApplicationPolicy.Add(_isClient ? s_serverAuthOid : s_clientAuthOid);
}
- if (MsQuicApi.UsesSChannelBackend)
+ if (chainData.Length > 0)
{
- result = new X509Certificate2((IntPtr)certificatePtr);
+ X509Certificate2Collection additionalCertificates = new X509Certificate2Collection();
+ additionalCertificates.Import(chainData);
+ chain.ChainPolicy.ExtraStore.AddRange(additionalCertificates);
}
- else
- {
- if (certificatePtr->Length > 0)
- {
- certificateBuffer = (IntPtr)certificatePtr->Buffer;
- certificateLength = (int)certificatePtr->Length;
- result = new X509Certificate2(certificatePtr->Span);
- }
- if (chainPtr->Length > 0)
- {
- X509Certificate2Collection additionalCertificates = new X509Certificate2Collection();
- additionalCertificates.Import(chainPtr->Span);
- chain.ChainPolicy.ExtraStore.AddRange(additionalCertificates);
- }
- }
- }
-
- if (result is not null)
- {
bool checkCertName = !chain!.ChainPolicy!.VerificationFlags.HasFlag(X509VerificationFlags.IgnoreInvalidName);
- sslPolicyErrors |= CertificateValidation.BuildChainAndVerifyProperties(chain!, result, checkCertName, !_isClient, TargetHostNameHelper.NormalizeHostName(_targetHost), certificateBuffer, certificateLength);
+ sslPolicyErrors |= CertificateValidation.BuildChainAndVerifyProperties(chain!, certificate, checkCertName, !_isClient, TargetHostNameHelper.NormalizeHostName(_targetHost), certData);
}
else if (_certificateRequired)
{
sslPolicyErrors |= SslPolicyErrors.RemoteCertificateNotAvailable;
}
- int status = QUIC_STATUS_SUCCESS;
+ QUIC_TLS_ALERT_CODES result = QUIC_TLS_ALERT_CODES.SUCCESS;
if (_validationCallback is not null)
{
wrapException = true;
- if (!_validationCallback(_connection, result, chain, sslPolicyErrors))
+ if (!_validationCallback(_connection, certificate, chain, sslPolicyErrors))
{
wrapException = false;
if (_isClient)
@@ -140,7 +230,7 @@ public partial class QuicConnection
throw new AuthenticationException(SR.net_quic_cert_custom_validation);
}
- status = QUIC_STATUS_USER_CANCELED;
+ result = QUIC_TLS_ALERT_CODES.BAD_CERTIFICATE;
}
}
else if (sslPolicyErrors != SslPolicyErrors.None)
@@ -150,15 +240,13 @@ public partial class QuicConnection
throw new AuthenticationException(SR.Format(SR.net_quic_cert_chain_validation, sslPolicyErrors));
}
- status = QUIC_STATUS_HANDSHAKE_FAILURE;
+ result = QUIC_TLS_ALERT_CODES.BAD_CERTIFICATE;
}
- certificate = result;
- return status;
+ return result;
}
catch (Exception ex)
{
- result?.Dispose();
if (wrapException)
{
throw new QuicException(QuicError.CallbackError, null, SR.net_quic_callback_error, ex);
diff --git a/src/libraries/System.Net.Quic/src/System/Net/Quic/QuicConnection.cs b/src/libraries/System.Net.Quic/src/System/Net/Quic/QuicConnection.cs
index db3adf776d54..5a4f626e2f54 100644
--- a/src/libraries/System.Net.Quic/src/System/Net/Quic/QuicConnection.cs
+++ b/src/libraries/System.Net.Quic/src/System/Net/Quic/QuicConnection.cs
@@ -571,15 +571,20 @@ public sealed partial class QuicConnection : IAsyncDisposable
}
private unsafe int HandleEventPeerCertificateReceived(ref PEER_CERTIFICATE_RECEIVED_DATA data)
{
- try
+ //
+ // The certificate validation is an expensive operation and we don't want to delay MsQuic
+ // worker thread. So we offload the validation to the .NET threadpool. Incidentally, this
+ // also prevents potential user RemoteCertificateValidationCallback from blocking MsQuic
+ // worker threads.
+ //
+
+ var task = _sslConnectionOptions.StartAsyncCertificateValidation((IntPtr)data.Certificate, (IntPtr)data.Chain);
+ if (task.IsCompletedSuccessfully)
{
- return _sslConnectionOptions.ValidateCertificate((QUIC_BUFFER*)data.Certificate, (QUIC_BUFFER*)data.Chain, out _remoteCertificate);
- }
- catch (Exception ex)
- {
- _connectedTcs.TrySetException(ex);
- return QUIC_STATUS_HANDSHAKE_FAILURE;
+ return task.Result ? QUIC_STATUS_SUCCESS : QUIC_STATUS_BAD_CERTIFICATE;
}
+
+ return QUIC_STATUS_PENDING;
}
private unsafe int HandleConnectionEvent(ref QUIC_CONNECTION_EVENT connectionEvent)
diff --git a/src/libraries/System.Net.Quic/src/System/Net/Quic/QuicListener.cs b/src/libraries/System.Net.Quic/src/System/Net/Quic/QuicListener.cs
index 6f0a0d8bb5b7..88ea309054a7 100644
--- a/src/libraries/System.Net.Quic/src/System/Net/Quic/QuicListener.cs
+++ b/src/libraries/System.Net.Quic/src/System/Net/Quic/QuicListener.cs
@@ -209,6 +209,11 @@ public sealed partial class QuicListener : IAsyncDisposable
/// <param name="clientHello">The TLS ClientHello data.</param>
private async void StartConnectionHandshake(QuicConnection connection, SslClientHelloInfo clientHello)
{
+ // Yield to the threadpool immediately. This makes sure the connection options callback
+ // provided by the user is not invoked from the MsQuic thread and cannot delay acks
+ // or other operations on other connections.
+ await Task.CompletedTask.ConfigureAwait(ConfigureAwaitOptions.ForceYielding);
+
bool wrapException = false;
CancellationToken cancellationToken = default;
diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicCipherSuitesPolicyTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicCipherSuitesPolicyTests.cs
index 2d95e08c7439..45d65a0b7cdc 100644
--- a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicCipherSuitesPolicyTests.cs
+++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicCipherSuitesPolicyTests.cs
@@ -11,6 +11,7 @@ namespace System.Net.Quic.Tests
[Collection(nameof(QuicTestCollection))]
[ConditionalClass(typeof(QuicTestBase), nameof(QuicTestBase.IsSupported), nameof(QuicTestBase.IsNotArm32CoreClrStressTest))]
[SkipOnPlatform(TestPlatforms.Windows, "CipherSuitesPolicy is not supported on Windows")]
+ [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsAlpine), nameof(PlatformDetection.IsArmProcess))]
public class MsQuicCipherSuitesPolicyTests : QuicTestBase
{
public MsQuicCipherSuitesPolicyTests(ITestOutputHelper output) : base(output) { }
diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicPlatformDetectionTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicPlatformDetectionTests.cs
index 891e6c735021..7c2511bbb6d1 100644
--- a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicPlatformDetectionTests.cs
+++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicPlatformDetectionTests.cs
@@ -60,6 +60,7 @@ namespace System.Net.Quic.Tests
[ActiveIssue("https://github.com/dotnet/runtime/issues/82154", typeof(PlatformDetection), nameof(PlatformDetection.IsRaspbian10), nameof(PlatformDetection.IsArmv6Process), nameof(PlatformDetection.IsInContainer))]
[ActiveIssue("https://github.com/dotnet/runtime/issues/82154", typeof(PlatformDetection), nameof(PlatformDetection.IsPpc64leProcess))]
[ActiveIssue("https://github.com/dotnet/runtime/issues/82154", typeof(PlatformDetection), nameof(PlatformDetection.IsUbuntu2004), nameof(PlatformDetection.IsS390xProcess))]
+ [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsAlpine), nameof(PlatformDetection.IsArmProcess))]
[ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsInHelix))]
[PlatformSpecific(TestPlatforms.Linux)]
public void SupportedLinuxPlatforms_IsSupportedIsTrue()
diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicRemoteExecutorTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicRemoteExecutorTests.cs
index 90cec1a5237a..882600acf6a2 100644
--- a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicRemoteExecutorTests.cs
+++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicRemoteExecutorTests.cs
@@ -14,6 +14,7 @@ namespace System.Net.Quic.Tests
{
[Collection(nameof(QuicTestCollection))]
[ConditionalClass(typeof(QuicTestBase), nameof(QuicTestBase.IsSupported), nameof(QuicTestBase.IsNotArm32CoreClrStressTest))]
+ [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsAlpine), nameof(PlatformDetection.IsArmProcess))]
public class MsQuicRemoteExecutorTests : QuicTestBase
{
public MsQuicRemoteExecutorTests()
diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicTests.cs
index 31177d6c15e0..b432cfc0aba2 100644
--- a/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicTests.cs
+++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/MsQuicTests.cs
@@ -48,6 +48,7 @@ namespace System.Net.Quic.Tests
[Collection(nameof(QuicTestCollection))]
[ConditionalClass(typeof(QuicTestBase), nameof(QuicTestBase.IsSupported), nameof(QuicTestBase.IsNotArm32CoreClrStressTest))]
+ [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsAlpine), nameof(PlatformDetection.IsArmProcess))]
public class MsQuicTests : QuicTestBase, IClassFixture<CertificateSetup>
{
private static byte[] s_data = "Hello world!"u8.ToArray();
@@ -356,7 +357,10 @@ namespace System.Net.Quic.Tests
}
}
+ static bool SupportsAsyncCertValidation => QuicTestCollection.MsQuicVersion >= new Version(2, 4);
+
[Fact]
+ [ActiveIssue("https://github.com/dotnet/runtime/issues/99074", typeof(MsQuicTests), nameof(SupportsAsyncCertValidation))]
public async Task CertificateCallbackThrowPropagates()
{
using CancellationTokenSource cts = new CancellationTokenSource(PassingTestTimeout);
diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicConnectionTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicConnectionTests.cs
index f7c70196e6d1..324a76f5e569 100644
--- a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicConnectionTests.cs
+++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicConnectionTests.cs
@@ -6,6 +6,7 @@ using System.Net.Sockets;
using System.Security.Cryptography.X509Certificates;
using System.Threading;
using System.Threading.Tasks;
+using Microsoft.DotNet.XUnitExtensions;
using Xunit;
using Xunit.Abstractions;
@@ -15,6 +16,7 @@ namespace System.Net.Quic.Tests
[Collection(nameof(QuicTestCollection))]
[ConditionalClass(typeof(QuicTestBase), nameof(QuicTestBase.IsSupported), nameof(QuicTestBase.IsNotArm32CoreClrStressTest))]
+ [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsAlpine), nameof(PlatformDetection.IsArmProcess))]
public sealed class QuicConnectionTests : QuicTestBase
{
const int ExpectedErrorCode = 1234;
@@ -22,7 +24,7 @@ namespace System.Net.Quic.Tests
public QuicConnectionTests(ITestOutputHelper output) : base(output) { }
- [Theory]
+ [ConditionalTheory]
[MemberData(nameof(LocalAddresses))]
public async Task TestConnect(IPAddress address)
{
diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicListenerTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicListenerTests.cs
index 0de2863f902f..c9c394fcfb19 100644
--- a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicListenerTests.cs
+++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicListenerTests.cs
@@ -15,6 +15,7 @@ namespace System.Net.Quic.Tests
{
[Collection(nameof(QuicTestCollection))]
[ConditionalClass(typeof(QuicTestBase), nameof(QuicTestBase.IsSupported), nameof(QuicTestBase.IsNotArm32CoreClrStressTest))]
+ [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsAlpine), nameof(PlatformDetection.IsArmProcess))]
public sealed class QuicListenerTests : QuicTestBase
{
public QuicListenerTests(ITestOutputHelper output) : base(output) { }
diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamConnectedStreamConformanceTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamConnectedStreamConformanceTests.cs
index 2f8b7a66ff6c..7b9257cdc266 100644
--- a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamConnectedStreamConformanceTests.cs
+++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamConnectedStreamConformanceTests.cs
@@ -16,6 +16,7 @@ namespace System.Net.Quic.Tests
{
[Collection(nameof(QuicTestCollection))]
[ConditionalClass(typeof(QuicTestBase), nameof(QuicTestBase.IsSupported), nameof(QuicTestBase.IsNotArm32CoreClrStressTest))]
+ [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsAlpine), nameof(PlatformDetection.IsArmProcess))]
public sealed class QuicStreamConformanceTests : ConnectedStreamConformanceTests
{
protected override bool UsableAfterCanceledReads => false;
diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamTests.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamTests.cs
index e82bd1ea9e8c..e02851fac73e 100644
--- a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamTests.cs
+++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicStreamTests.cs
@@ -14,6 +14,7 @@ namespace System.Net.Quic.Tests
{
[Collection(nameof(QuicTestCollection))]
[ConditionalClass(typeof(QuicTestBase), nameof(QuicTestBase.IsSupported), nameof(QuicTestBase.IsNotArm32CoreClrStressTest))]
+ [ActiveIssue("https://github.com/dotnet/runtime/issues/91757", typeof(PlatformDetection), nameof(PlatformDetection.IsAlpine), nameof(PlatformDetection.IsArmProcess))]
public sealed class QuicStreamTests : QuicTestBase
{
private static byte[] s_data = "Hello world!"u8.ToArray();
diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicTestBase.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicTestBase.cs
index d85cf0e5ed3d..c3e0e4e7372a 100644
--- a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicTestBase.cs
+++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicTestBase.cs
@@ -18,6 +18,8 @@ using Microsoft.Quic;
namespace System.Net.Quic.Tests
{
+ using Configuration = System.Net.Test.Common.Configuration;
+
public abstract class QuicTestBase : IDisposable
{
public const long DefaultStreamErrorCodeClient = 123456;
@@ -31,8 +33,7 @@ namespace System.Net.Quic.Tests
public static bool IsSupported => QuicListener.IsSupported && QuicConnection.IsSupported;
public static bool IsNotArm32CoreClrStressTest => !(CoreClrConfigurationDetection.IsStressTest && PlatformDetection.IsArmProcess);
- private static readonly Lazy<bool> _isIPv6Available = new Lazy<bool>(GetIsIPv6Available);
- public static bool IsIPv6Available => _isIPv6Available.Value;
+ public static bool IsIPv6Available => Configuration.Sockets.IsIPv6LoopbackAvailable;
public static SslApplicationProtocol ApplicationProtocol { get; } = new SslApplicationProtocol("quictest");
@@ -375,19 +376,5 @@ namespace System.Net.Quic.Tests
ArrayPool<byte>.Shared.Return(buffer);
}
}
-
- internal static bool GetIsIPv6Available()
- {
- try
- {
- using Socket s = new Socket(AddressFamily.InterNetworkV6, SocketType.Dgram, ProtocolType.Udp);
- s.Bind(new IPEndPoint(IPAddress.IPv6Loopback, 0));
- return true;
- }
- catch (SocketException)
- {
- return false;
- }
- }
}
}
diff --git a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicTestCollection.cs b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicTestCollection.cs
index aac056df7cc1..f8dd160acb00 100644
--- a/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicTestCollection.cs
+++ b/src/libraries/System.Net.Quic/tests/FunctionalTests/QuicTestCollection.cs
@@ -13,11 +13,15 @@ using Xunit.Abstractions;
using Microsoft.Quic;
using static Microsoft.Quic.MsQuic;
+namespace System.Net.Quic.Tests;
+
[CollectionDefinition(nameof(QuicTestCollection), DisableParallelization = true)]
public unsafe class QuicTestCollection : ICollectionFixture<QuicTestCollection>, IDisposable
{
public static bool IsSupported => QuicListener.IsSupported && QuicConnection.IsSupported;
+ public static Version MsQuicVersion { get; } = GetMsQuicVersion();
+
public QuicTestCollection()
{
string msQuicLibraryVersion = GetMsQuicLibraryVersion();
@@ -80,6 +84,13 @@ public unsafe class QuicTestCollection : ICollectionFixture<QuicTestCollection>,
System.Console.WriteLine(sb.ToString());
}
+ private static Version GetMsQuicVersion()
+ {
+ Type msQuicApiType = Type.GetType("System.Net.Quic.MsQuicApi, System.Net.Quic");
+
+ return (Version)msQuicApiType.GetProperty("Version", BindingFlags.NonPublic | BindingFlags.Static).GetGetMethod(true).Invoke(null, Array.Empty<object?>());
+ }
+
private static string? GetMsQuicLibraryVersion()
{
Type msQuicApiType = Type.GetType("System.Net.Quic.MsQuicApi, System.Net.Quic");
diff --git a/src/libraries/System.Net.Requests/src/Resources/Strings.resx b/src/libraries/System.Net.Requests/src/Resources/Strings.resx
index b33f2a024403..4c0a7a45c146 100644
--- a/src/libraries/System.Net.Requests/src/Resources/Strings.resx
+++ b/src/libraries/System.Net.Requests/src/Resources/Strings.resx
@@ -264,4 +264,7 @@
<data name="net_proxyschemenotsupported" xml:space="preserve">
<value>The ServicePointManager does not support proxies with the {0} scheme.</value>
</data>
+ <data name="net_maximumbindretries" xml:space="preserve">
+ <value>Reached the maximum number of BindIPEndPointDelegate retries.</value>
+ </data>
</root>
diff --git a/src/libraries/System.Net.Requests/src/System.Net.Requests.csproj b/src/libraries/System.Net.Requests/src/System.Net.Requests.csproj
index 397622b4806a..b53b7272ea41 100644
--- a/src/libraries/System.Net.Requests/src/System.Net.Requests.csproj
+++ b/src/libraries/System.Net.Requests/src/System.Net.Requests.csproj
@@ -110,6 +110,7 @@
<Reference Include="System.Diagnostics.Tracing" />
<Reference Include="System.Memory" />
<Reference Include="System.Net.Http" />
+ <Reference Include="System.Net.NameResolution" />
<Reference Include="System.Net.Primitives" />
<Reference Include="System.Net.Security" />
<Reference Include="System.Net.Sockets" />
diff --git a/src/libraries/System.Net.Requests/src/System/Net/HttpWebRequest.cs b/src/libraries/System.Net.Requests/src/System/Net/HttpWebRequest.cs
index 2a54bbb4d8d5..44c601159136 100644
--- a/src/libraries/System.Net.Requests/src/System/Net/HttpWebRequest.cs
+++ b/src/libraries/System.Net.Requests/src/System/Net/HttpWebRequest.cs
@@ -6,6 +6,7 @@ using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Globalization;
using System.IO;
+using System.Net;
using System.Net.Cache;
using System.Net.Http;
using System.Net.Http.Headers;
@@ -41,6 +42,7 @@ namespace System.Net
private Task<HttpResponseMessage>? _sendRequestTask;
private static int _defaultMaxResponseHeadersLength = HttpHandlerDefaults.DefaultMaxResponseHeadersLength;
+ private static int _defaultMaximumErrorResponseLength = -1;
private int _beginGetRequestStreamCalled;
private int _beginGetResponseCalled;
@@ -420,11 +422,7 @@ namespace System.Net
/// <devdoc>
/// <para>Sets the media type header</para>
/// </devdoc>
- public string? MediaType
- {
- get;
- set;
- }
+ public string? MediaType { get; set; }
/// <devdoc>
/// <para>
@@ -677,14 +675,22 @@ namespace System.Net
}
set
{
+ ArgumentOutOfRangeException.ThrowIfLessThan(value, 0);
_defaultMaxResponseHeadersLength = value;
}
}
- // NOP
public static int DefaultMaximumErrorResponseLength
{
- get; set;
+ get
+ {
+ return _defaultMaximumErrorResponseLength;
+ }
+ set
+ {
+ ArgumentOutOfRangeException.ThrowIfLessThan(value, -1);
+ _defaultMaximumErrorResponseLength = value;
+ }
}
private static RequestCachePolicy? _defaultCachePolicy = new RequestCachePolicy(RequestCacheLevel.BypassCache);
@@ -806,10 +812,12 @@ namespace System.Net
if (value.Equals(HttpVersion.Version11))
{
IsVersionHttp10 = false;
+ ServicePoint.ProtocolVersion = HttpVersion.Version11;
}
else if (value.Equals(HttpVersion.Version10))
{
IsVersionHttp10 = true;
+ ServicePoint.ProtocolVersion = HttpVersion.Version10;
}
else
{
@@ -1621,6 +1629,13 @@ namespace System.Net
handler.UseCookies = false;
}
+ if (parameters.ServicePoint is { } servicePoint)
+ {
+ handler.MaxConnectionsPerServer = servicePoint.ConnectionLimit;
+ handler.PooledConnectionIdleTimeout = TimeSpan.FromMilliseconds(servicePoint.MaxIdleTime);
+ handler.PooledConnectionLifetime = TimeSpan.FromMilliseconds(servicePoint.ConnectionLeaseTimeout);
+ }
+
Debug.Assert(handler.UseProxy); // Default of handler.UseProxy is true.
Debug.Assert(handler.Proxy == null); // Default of handler.Proxy is null.
@@ -1638,7 +1653,7 @@ namespace System.Net
{
handler.UseProxy = false;
}
- else if (!object.ReferenceEquals(parameters.Proxy, WebRequest.GetSystemWebProxy()))
+ else if (!ReferenceEquals(parameters.Proxy, GetSystemWebProxy()))
{
handler.Proxy = parameters.Proxy;
}
@@ -1659,10 +1674,20 @@ namespace System.Net
handler.SslOptions.EnabledSslProtocols = (SslProtocols)parameters.SslProtocols;
handler.SslOptions.CertificateRevocationCheckMode = parameters.CheckCertificateRevocationList ? X509RevocationMode.Online : X509RevocationMode.NoCheck;
RemoteCertificateValidationCallback? rcvc = parameters.ServerCertificateValidationCallback;
- if (rcvc != null)
+ handler.SslOptions.RemoteCertificateValidationCallback = (message, cert, chain, errors) =>
{
- handler.SslOptions.RemoteCertificateValidationCallback = (message, cert, chain, errors) => rcvc(request!, cert, chain, errors);
- }
+ if (parameters.ServicePoint is { } servicePoint)
+ {
+ servicePoint.Certificate = cert;
+ }
+
+ if (rcvc is not null)
+ {
+ return rcvc(request!, cert, chain, errors);
+ }
+
+ return errors == SslPolicyErrors.None;
+ };
// Set up a ConnectCallback so that we can control Socket-specific settings, like ReadWriteTimeout => socket.Send/ReceiveTimeout.
handler.ConnectCallback = async (context, cancellationToken) =>
@@ -1671,6 +1696,10 @@ namespace System.Net
try
{
+ IPAddress[] addresses = parameters.Async ?
+ await Dns.GetHostAddressesAsync(context.DnsEndPoint.Host, cancellationToken).ConfigureAwait(false) :
+ Dns.GetHostAddresses(context.DnsEndPoint.Host);
+
if (parameters.ServicePoint is { } servicePoint)
{
if (servicePoint.ReceiveBufferSize != -1)
@@ -1684,19 +1713,58 @@ namespace System.Net
socket.SetSocketOption(SocketOptionLevel.Tcp, SocketOptionName.TcpKeepAliveTime, keepAlive.Time);
socket.SetSocketOption(SocketOptionLevel.Tcp, SocketOptionName.TcpKeepAliveInterval, keepAlive.Interval);
}
+
+ BindHelper(servicePoint, ref addresses, socket, context.DnsEndPoint.Port);
+ static void BindHelper(ServicePoint servicePoint, ref IPAddress[] addresses, Socket socket, int port)
+ {
+ if (servicePoint.BindIPEndPointDelegate is null)
+ {
+ return;
+ }
+
+ const int MaxRetries = 100;
+ foreach (IPAddress address in addresses)
+ {
+ int retryCount = 0;
+ for (; retryCount < MaxRetries; retryCount++)
+ {
+ IPEndPoint? endPoint = servicePoint.BindIPEndPointDelegate(servicePoint, new IPEndPoint(address, port), retryCount);
+ if (endPoint is null) // Get other address to try
+ {
+ break;
+ }
+
+ try
+ {
+ socket.Bind(endPoint);
+ addresses = [address];
+ return; // Bind successful, exit loops.
+ }
+ catch
+ {
+ continue;
+ }
+ }
+
+ if (retryCount >= MaxRetries)
+ {
+ throw new OverflowException(SR.net_maximumbindretries);
+ }
+ }
+ }
}
- socket.NoDelay = true;
+ socket.NoDelay = !(parameters.ServicePoint?.UseNagleAlgorithm) ?? true;
if (parameters.Async)
{
- await socket.ConnectAsync(context.DnsEndPoint, cancellationToken).ConfigureAwait(false);
+ await socket.ConnectAsync(addresses, context.DnsEndPoint.Port, cancellationToken).ConfigureAwait(false);
}
else
{
using (cancellationToken.UnsafeRegister(s => ((Socket)s!).Dispose(), socket))
{
- socket.Connect(context.DnsEndPoint);
+ socket.Connect(addresses, context.DnsEndPoint.Port);
}
// Throw in case cancellation caused the socket to be disposed after the Connect completed
diff --git a/src/libraries/System.Net.Requests/src/System/Net/HttpWebResponse.cs b/src/libraries/System.Net.Requests/src/System/Net/HttpWebResponse.cs
index f7fae7869b1e..7b0e9b90681f 100644
--- a/src/libraries/System.Net.Requests/src/System/Net/HttpWebResponse.cs
+++ b/src/libraries/System.Net.Requests/src/System/Net/HttpWebResponse.cs
@@ -8,6 +8,8 @@ using System.IO;
using System.Net.Http;
using System.Runtime.Serialization;
using System.Text;
+using System.Threading;
+using System.Threading.Tasks;
namespace System.Net
{
@@ -337,7 +339,14 @@ namespace System.Net
CheckDisposed();
if (_httpResponseMessage.Content != null)
{
- return _httpResponseMessage.Content.ReadAsStream();
+ Stream contentStream = _httpResponseMessage.Content.ReadAsStream();
+ int maxErrorResponseLength = HttpWebRequest.DefaultMaximumErrorResponseLength;
+ if (maxErrorResponseLength < 0 || StatusCode < HttpStatusCode.BadRequest)
+ {
+ return contentStream;
+ }
+
+ return new TruncatedReadStream(contentStream, maxErrorResponseLength);
}
return Stream.Null;
@@ -371,5 +380,56 @@ namespace System.Net
}
private static string GetHeaderValueAsString(IEnumerable<string> values) => string.Join(", ", values);
+
+ internal sealed class TruncatedReadStream(Stream innerStream, int maxSize) : Stream
+ {
+ public override bool CanRead => true;
+ public override bool CanSeek => false;
+ public override bool CanWrite => false;
+
+ public override long Length => throw new NotSupportedException();
+ public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); }
+
+ public override void Flush() => throw new NotSupportedException();
+
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ return Read(new Span<byte>(buffer, offset, count));
+ }
+
+ public override int Read(Span<byte> buffer)
+ {
+ int readBytes = innerStream.Read(buffer.Slice(0, Math.Min(buffer.Length, maxSize)));
+ maxSize -= readBytes;
+ return readBytes;
+ }
+
+ public override Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
+ {
+ return ReadAsync(new Memory<byte>(buffer, offset, count), cancellationToken).AsTask();
+ }
+
+ public override async ValueTask<int> ReadAsync(Memory<byte> buffer, CancellationToken cancellationToken = default)
+ {
+ int readBytes = await innerStream.ReadAsync(buffer.Slice(0, Math.Min(buffer.Length, maxSize)), cancellationToken)
+ .ConfigureAwait(false);
+ maxSize -= readBytes;
+ return readBytes;
+ }
+
+ public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
+ public override void SetLength(long value) => throw new NotSupportedException();
+ public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException();
+
+ public override ValueTask DisposeAsync() => innerStream.DisposeAsync();
+
+ protected override void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ innerStream.Dispose();
+ }
+ }
+ }
}
}
diff --git a/src/libraries/System.Net.Requests/src/System/Net/ServicePoint/ServicePointManager.cs b/src/libraries/System.Net.Requests/src/System/Net/ServicePoint/ServicePointManager.cs
index a0cf9dcece15..bbf20b3e808b 100644
--- a/src/libraries/System.Net.Requests/src/System/Net/ServicePoint/ServicePointManager.cs
+++ b/src/libraries/System.Net.Requests/src/System/Net/ServicePoint/ServicePointManager.cs
@@ -78,7 +78,7 @@ namespace System.Net
}
}
- public static bool UseNagleAlgorithm { get; set; } = true;
+ public static bool UseNagleAlgorithm { get; set; }
public static bool Expect100Continue { get; set; } = true;
@@ -156,7 +156,8 @@ namespace System.Net
IdleSince = DateTime.Now,
Expect100Continue = Expect100Continue,
UseNagleAlgorithm = UseNagleAlgorithm,
- KeepAlive = KeepAlive
+ KeepAlive = KeepAlive,
+ MaxIdleTime = MaxServicePointIdleTime
};
s_servicePointTable[tableKey] = new WeakReference<ServicePoint>(sp);
diff --git a/src/libraries/System.Net.Requests/tests/HttpWebRequestTest.cs b/src/libraries/System.Net.Requests/tests/HttpWebRequestTest.cs
index 45563ccc3dd0..73c66872a7e5 100644
--- a/src/libraries/System.Net.Requests/tests/HttpWebRequestTest.cs
+++ b/src/libraries/System.Net.Requests/tests/HttpWebRequestTest.cs
@@ -258,7 +258,7 @@ namespace System.Net.Tests
Assert.Equal(64, HttpWebRequest.DefaultMaximumResponseHeadersLength);
Assert.NotNull(HttpWebRequest.DefaultCachePolicy);
Assert.Equal(RequestCacheLevel.BypassCache, HttpWebRequest.DefaultCachePolicy.Level);
- Assert.Equal(0, HttpWebRequest.DefaultMaximumErrorResponseLength);
+ Assert.Equal(-1, HttpWebRequest.DefaultMaximumErrorResponseLength);
Assert.NotNull(request.Proxy);
Assert.Equal(remoteServer, request.RequestUri);
Assert.True(request.SupportsCookieContainer);
@@ -2089,7 +2089,7 @@ namespace System.Net.Tests
request.ContinueTimeout = 30000;
Stream requestStream = await request.GetRequestStreamAsync();
requestStream.Write("aaaa\r\n\r\n"u8);
- await request.GetResponseAsync();
+ await GetResponseAsync(request);
},
async (server) =>
{
@@ -2118,7 +2118,7 @@ namespace System.Net.Tests
request.ContinueTimeout = continueTimeout;
Stream requestStream = await request.GetRequestStreamAsync();
requestStream.Write("aaaa\r\n\r\n"u8);
- await request.GetResponseAsync();
+ await GetResponseAsync(request);
},
async (server) =>
{
@@ -2144,7 +2144,7 @@ namespace System.Net.Tests
HttpWebRequest request = WebRequest.CreateHttp(uri);
request.Method = "POST";
request.ServicePoint.Expect100Continue = expect100Continue;
- await request.GetResponseAsync();
+ await GetResponseAsync(request);
},
async (server) =>
{
@@ -2167,6 +2167,122 @@ namespace System.Net.Tests
);
}
+ [ConditionalFact(typeof(RemoteExecutor), nameof(RemoteExecutor.IsSupported))]
+ public void SendHttpRequest_WhenDefaultMaximumErrorResponseLengthSet_Success()
+ {
+ RemoteExecutor.Invoke(async (async) =>
+ {
+ TaskCompletionSource tcs = new TaskCompletionSource();
+ await LoopbackServer.CreateClientAndServerAsync(
+ async (uri) =>
+ {
+ HttpWebRequest request = WebRequest.CreateHttp(uri);
+ HttpWebRequest.DefaultMaximumErrorResponseLength = 5;
+ var exception =
+ await Assert.ThrowsAsync<WebException>(() => bool.Parse(async) ? request.GetResponseAsync() : Task.Run(() => request.GetResponse()));
+ tcs.SetResult();
+ Assert.NotNull(exception.Response);
+ using (var responseStream = exception.Response.GetResponseStream())
+ {
+ var buffer = new byte[10];
+ int readLen = responseStream.Read(buffer, 0, buffer.Length);
+ Assert.Equal(5, readLen);
+ Assert.Equal(new string('a', 5), Encoding.UTF8.GetString(buffer[0..readLen]));
+ Assert.Equal(0, responseStream.Read(buffer));
+ }
+ },
+ async (server) =>
+ {
+ await server.AcceptConnectionAsync(
+ async connection =>
+ {
+ await connection.SendResponseAsync(statusCode: HttpStatusCode.BadRequest, content: new string('a', 10));
+ await tcs.Task;
+ });
+ });
+ }, IsAsync.ToString()).Dispose();
+ }
+
+ [Fact]
+ public void HttpWebRequest_SetProtocolVersion_Success()
+ {
+ HttpWebRequest request = WebRequest.CreateHttp(Configuration.Http.RemoteEchoServer);
+
+ request.ProtocolVersion = HttpVersion.Version10;
+ Assert.Equal(HttpVersion.Version10, request.ServicePoint.ProtocolVersion);
+
+ request.ProtocolVersion = HttpVersion.Version11;
+ Assert.Equal(HttpVersion.Version11, request.ServicePoint.ProtocolVersion);
+ }
+
+ [ConditionalFact(typeof(RemoteExecutor), nameof(RemoteExecutor.IsSupported))]
+ public void SendHttpRequest_BindIPEndPoint_Success()
+ {
+ RemoteExecutor.Invoke(async (async) =>
+ {
+ TaskCompletionSource tcs = new TaskCompletionSource();
+ await LoopbackServer.CreateClientAndServerAsync(
+ async (uri) =>
+ {
+ HttpWebRequest request = WebRequest.CreateHttp(uri);
+ request.ServicePoint.BindIPEndPointDelegate = (_, _, _) => new IPEndPoint(IPAddress.Loopback, 27277);
+ var responseTask = bool.Parse(async) ? request.GetResponseAsync() : Task.Run(() => request.GetResponse());
+ using (var response = (HttpWebResponse)await responseTask)
+ {
+ Assert.Equal(HttpStatusCode.OK, response.StatusCode);
+ }
+ tcs.SetResult();
+ },
+ async (server) =>
+ {
+ await server.AcceptConnectionAsync(
+ async connection =>
+ {
+ var ipEp = (IPEndPoint)connection.Socket.RemoteEndPoint;
+ Assert.Equal(27277, ipEp.Port);
+ await connection.SendResponseAsync();
+ await tcs.Task;
+ });
+ });
+ }, IsAsync.ToString()).Dispose();
+ }
+
+ [ConditionalFact(typeof(RemoteExecutor), nameof(RemoteExecutor.IsSupported))]
+ public void SendHttpRequest_BindIPEndPoint_Throws()
+ {
+ RemoteExecutor.Invoke(async (async) =>
+ {
+ Socket socket = new Socket(SocketType.Stream, ProtocolType.Tcp);
+ socket.Bind(new IPEndPoint(IPAddress.Loopback, 0));
+ ValueTask<Socket>? clientSocket = null;
+ CancellationTokenSource cts = new CancellationTokenSource();
+ if (PlatformDetection.IsLinux)
+ {
+ socket.Listen();
+ clientSocket = socket.AcceptAsync(cts.Token);
+ }
+
+ try
+ {
+ // URI shouldn't matter because it should throw exception before connection open.
+ HttpWebRequest request = WebRequest.CreateHttp(Configuration.Http.RemoteEchoServer);
+ request.ServicePoint.BindIPEndPointDelegate = (_, _, _) => (IPEndPoint)socket.LocalEndPoint!;
+ var exception = await Assert.ThrowsAsync<WebException>(() =>
+ bool.Parse(async) ? request.GetResponseAsync() : Task.Run(() => request.GetResponse()));
+ Assert.IsType<OverflowException>(exception.InnerException?.InnerException);
+ }
+ finally
+ {
+ if (clientSocket is not null)
+ {
+ await cts.CancelAsync();
+ }
+ socket.Dispose();
+ cts.Dispose();
+ }
+ }, IsAsync.ToString()).Dispose();
+ }
+
private void RequestStreamCallback(IAsyncResult asynchronousResult)
{
RequestState state = (RequestState)asynchronousResult.AsyncState;
diff --git a/src/libraries/System.Net.Requests/tests/ServicePointTests/ServicePointManagerTest.cs b/src/libraries/System.Net.Requests/tests/ServicePointTests/ServicePointManagerTest.cs
index c1230598a8d4..ec64068ed456 100644
--- a/src/libraries/System.Net.Requests/tests/ServicePointTests/ServicePointManagerTest.cs
+++ b/src/libraries/System.Net.Requests/tests/ServicePointTests/ServicePointManagerTest.cs
@@ -181,7 +181,7 @@ namespace System.Net.Tests
[Fact]
public static void UseNagleAlgorithm_Roundtrips()
{
- Assert.True(ServicePointManager.UseNagleAlgorithm);
+ Assert.False(ServicePointManager.UseNagleAlgorithm);
try
{
ServicePointManager.UseNagleAlgorithm = false;
@@ -325,7 +325,7 @@ namespace System.Net.Tests
Assert.Equal(new Version(1, 1), sp.ProtocolVersion);
Assert.Equal(-1, sp.ReceiveBufferSize);
Assert.True(sp.SupportsPipelining, "SupportsPipelining");
- Assert.True(sp.UseNagleAlgorithm, "UseNagleAlgorithm");
+ Assert.False(sp.UseNagleAlgorithm, "UseNagleAlgorithm");
}).Dispose();
}
diff --git a/src/libraries/System.Net.Sockets/src/System/Net/Sockets/Socket.Tasks.cs b/src/libraries/System.Net.Sockets/src/System/Net/Sockets/Socket.Tasks.cs
index e1891bef916f..3ba24e90cf10 100644
--- a/src/libraries/System.Net.Sockets/src/System/Net/Sockets/Socket.Tasks.cs
+++ b/src/libraries/System.Net.Sockets/src/System/Net/Sockets/Socket.Tasks.cs
@@ -677,7 +677,6 @@ namespace System.Net.Sockets
Debug.Assert(saea.BufferList == null);
saea.SetBuffer(MemoryMarshal.AsMemory(buffer));
saea.SocketFlags = socketFlags;
- saea._socketAddress = null;
saea.RemoteEndPoint = remoteEP;
saea.WrapExceptionsForNetworkStream = false;
return saea.SendToAsync(this, cancellationToken);
@@ -709,8 +708,17 @@ namespace System.Net.Sockets
saea.SetBuffer(MemoryMarshal.AsMemory(buffer));
saea.SocketFlags = socketFlags;
saea._socketAddress = socketAddress;
+ saea.RemoteEndPoint = null;
saea.WrapExceptionsForNetworkStream = false;
- return saea.SendToAsync(this, cancellationToken);
+ try
+ {
+ return saea.SendToAsync(this, cancellationToken);
+ }
+ finally
+ {
+ // detach user provided SA so we do not accidentally stomp on it later.
+ saea._socketAddress = null;
+ }
}
/// <summary>
diff --git a/src/libraries/System.Net.Sockets/src/System/Net/Sockets/Socket.cs b/src/libraries/System.Net.Sockets/src/System/Net/Sockets/Socket.cs
index 11b8674d681f..a8c95005154c 100644
--- a/src/libraries/System.Net.Sockets/src/System/Net/Sockets/Socket.cs
+++ b/src/libraries/System.Net.Sockets/src/System/Net/Sockets/Socket.cs
@@ -3095,14 +3095,22 @@ namespace System.Net.Sockets
ArgumentNullException.ThrowIfNull(e);
EndPoint? endPointSnapshot = e.RemoteEndPoint;
- if (e._socketAddress == null)
+
+ // RemoteEndPoint should be set unless somebody used SendTo with their own SA.
+ // In that case RemoteEndPoint will be null and we take provided SA as given.
+ if (endPointSnapshot == null && e._socketAddress == null)
{
- if (endPointSnapshot == null)
- {
- throw new ArgumentException(SR.Format(SR.InvalidNullArgument, "e.RemoteEndPoint"), nameof(e));
- }
+ throw new ArgumentException(SR.Format(SR.InvalidNullArgument, "e.RemoteEndPoint"), nameof(e));
+ }
- // Prepare SocketAddress
+ if (e._socketAddress != null && endPointSnapshot is IPEndPoint ipep && e._socketAddress.Family == endPointSnapshot?.AddressFamily)
+ {
+ // we have matching SocketAddress. Since this is only used internally, it is ok to overwrite it without
+ ipep.Serialize(e._socketAddress.Buffer.Span);
+ }
+ else if (endPointSnapshot != null)
+ {
+ // Prepare new SocketAddress
e._socketAddress = Serialize(ref endPointSnapshot);
}
diff --git a/src/libraries/System.Net.Sockets/src/System/Net/Sockets/SocketAsyncEventArgs.cs b/src/libraries/System.Net.Sockets/src/System/Net/Sockets/SocketAsyncEventArgs.cs
index e94d862571a0..78dd22e5eda7 100644
--- a/src/libraries/System.Net.Sockets/src/System/Net/Sockets/SocketAsyncEventArgs.cs
+++ b/src/libraries/System.Net.Sockets/src/System/Net/Sockets/SocketAsyncEventArgs.cs
@@ -923,7 +923,12 @@ namespace System.Net.Sockets
case SocketAsyncOperation.ReceiveFrom:
// Deal with incoming address.
UpdateReceivedSocketAddress(_socketAddress!);
- if (_remoteEndPoint != null && !SocketAddressExtensions.Equals(_socketAddress!, _remoteEndPoint))
+ if (_remoteEndPoint == null)
+ {
+ // detach user provided SA as it was updated in place.
+ _socketAddress = null;
+ }
+ else if (!SocketAddressExtensions.Equals(_socketAddress!, _remoteEndPoint))
{
try
{
diff --git a/src/libraries/System.Net.Sockets/src/System/Net/Sockets/SocketPal.Unix.cs b/src/libraries/System.Net.Sockets/src/System/Net/Sockets/SocketPal.Unix.cs
index d2ac959e0b4c..837743dfa344 100644
--- a/src/libraries/System.Net.Sockets/src/System/Net/Sockets/SocketPal.Unix.cs
+++ b/src/libraries/System.Net.Sockets/src/System/Net/Sockets/SocketPal.Unix.cs
@@ -528,7 +528,6 @@ namespace System.Net.Sockets
out SocketFlags receivedFlags, out IPPacketInformation ipPacketInformation, out Interop.Error errno)
{
Debug.Assert(socket.IsSocket);
- Debug.Assert(socketAddress != null, "Expected non-null socketAddress");
int buffersCount = buffers.Count;
bool allocOnStack = buffersCount <= IovStackThreshold;
@@ -810,7 +809,6 @@ namespace System.Net.Sockets
{
Debug.Assert(flags == SocketFlags.None);
Debug.Assert(buffers == null);
- Debug.Assert(socketAddress == null);
receivedFlags = default;
received = SysRead(socket, buffer, out errno);
@@ -956,7 +954,7 @@ namespace System.Net.Sockets
{
sent = buffers != null ?
SysSend(socket, flags, buffers, ref bufferIndex, ref offset, socketAddress, out errno) :
- socketAddress == null ? SysSend(socket, flags, buffer, ref offset, ref count, out errno) :
+ socketAddress.IsEmpty ? SysSend(socket, flags, buffer, ref offset, ref count, out errno) :
SysSend(socket, flags, buffer, ref offset, ref count, socketAddress, out errno);
}
}
diff --git a/src/libraries/System.Net.Sockets/tests/FunctionalTests/SendTo.cs b/src/libraries/System.Net.Sockets/tests/FunctionalTests/SendTo.cs
index bf0ad1465886..7a3c33b64bf7 100644
--- a/src/libraries/System.Net.Sockets/tests/FunctionalTests/SendTo.cs
+++ b/src/libraries/System.Net.Sockets/tests/FunctionalTests/SendTo.cs
@@ -173,6 +173,35 @@ namespace System.Net.Sockets.Tests
public sealed class SendTo_Task : SendTo<SocketHelperTask>
{
public SendTo_Task(ITestOutputHelper output) : base(output) { }
+
+ [Theory]
+ [InlineData(false)]
+ [InlineData(true)]
+ public async Task SendTo_DifferentEP_Success(bool ipv4)
+ {
+ IPAddress address = ipv4 ? IPAddress.Loopback : IPAddress.IPv6Loopback;
+ IPEndPoint remoteEp = new IPEndPoint(address, 0);
+
+ using Socket receiver1 = new Socket(address.AddressFamily, SocketType.Dgram, ProtocolType.Udp);
+ using Socket receiver2 = new Socket(address.AddressFamily, SocketType.Dgram, ProtocolType.Udp);
+ using Socket sender = new Socket(address.AddressFamily, SocketType.Dgram, ProtocolType.Udp);
+
+ receiver1.BindToAnonymousPort(address);
+ receiver2.BindToAnonymousPort(address);
+
+ byte[] sendBuffer = new byte[32];
+ var receiveInternalBuffer = new byte[sendBuffer.Length];
+ ArraySegment<byte> receiveBuffer = new ArraySegment<byte>(receiveInternalBuffer, 0, receiveInternalBuffer.Length);
+
+
+ await sender.SendToAsync(sendBuffer, SocketFlags.None, receiver1.LocalEndPoint);
+ SocketReceiveFromResult result = await ReceiveFromAsync(receiver1, receiveBuffer, remoteEp).WaitAsync(TestSettings.PassingTestTimeout);
+ Assert.Equal(sendBuffer.Length, result.ReceivedBytes);
+
+ await sender.SendToAsync(sendBuffer, SocketFlags.None, receiver2.LocalEndPoint);
+ result = await ReceiveFromAsync(receiver2, receiveBuffer, remoteEp).WaitAsync(TestSettings.PassingTestTimeout);
+ Assert.Equal(sendBuffer.Length, result.ReceivedBytes);
+ }
}
public sealed class SendTo_CancellableTask : SendTo<SocketHelperCancellableTask>
diff --git a/src/libraries/System.Net.Sockets/tests/FunctionalTests/SocketAsyncEventArgsTest.cs b/src/libraries/System.Net.Sockets/tests/FunctionalTests/SocketAsyncEventArgsTest.cs
index ded34276f322..3d865cb86457 100644
--- a/src/libraries/System.Net.Sockets/tests/FunctionalTests/SocketAsyncEventArgsTest.cs
+++ b/src/libraries/System.Net.Sockets/tests/FunctionalTests/SocketAsyncEventArgsTest.cs
@@ -895,5 +895,52 @@ namespace System.Net.Sockets.Tests
return cwt.Count() == 0; // validate that the cwt becomes empty
}, 30_000));
}
+
+ [Theory]
+ [InlineData(false)]
+ [InlineData(true)]
+ public async Task SendTo_DifferentEP_Success(bool ipv4)
+ {
+ IPAddress address = ipv4 ? IPAddress.Loopback : IPAddress.IPv6Loopback;
+ IPEndPoint remoteEp = new IPEndPoint(address, 0);
+
+ using Socket receiver1 = new Socket(address.AddressFamily, SocketType.Dgram, ProtocolType.Udp);
+ using Socket receiver2 = new Socket(address.AddressFamily, SocketType.Dgram, ProtocolType.Udp);
+ using Socket sender = new Socket(address.AddressFamily, SocketType.Dgram, ProtocolType.Udp);
+
+ receiver1.BindToAnonymousPort(address);
+ receiver2.BindToAnonymousPort(address);
+
+ byte[] sendBuffer = new byte[32];
+ var receiveInternalBuffer = new byte[sendBuffer.Length];
+ ArraySegment<byte> receiveBuffer = new ArraySegment<byte>(receiveInternalBuffer, 0, receiveInternalBuffer.Length);
+
+ using SocketAsyncEventArgs saea = new SocketAsyncEventArgs();
+ ManualResetEventSlim mres = new ManualResetEventSlim(false);
+
+ saea.SetBuffer(sendBuffer);
+ saea.RemoteEndPoint = receiver1.LocalEndPoint;
+ saea.Completed += delegate { mres.Set(); };
+ if (sender.SendToAsync(saea))
+ {
+ // did not finish synchronously.
+ mres.Wait();
+ }
+
+ SocketReceiveFromResult result = await receiver1.ReceiveFromAsync(receiveBuffer, remoteEp).WaitAsync(TestSettings.PassingTestTimeout);
+ Assert.Equal(sendBuffer.Length, result.ReceivedBytes);
+ mres.Reset();
+
+
+ saea.RemoteEndPoint = receiver2.LocalEndPoint;
+ if (sender.SendToAsync(saea))
+ {
+ // did not finish synchronously.
+ mres.Wait();
+ }
+
+ result = await receiver2.ReceiveFromAsync(receiveBuffer, remoteEp).WaitAsync(TestSettings.PassingTestTimeout);
+ Assert.Equal(sendBuffer.Length, result.ReceivedBytes);
+ }
}
}
diff --git a/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/netcore/TensorPrimitives.LeadingZeroCount.cs b/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/netcore/TensorPrimitives.LeadingZeroCount.cs
index 7c90a9666bba..ab51042925f0 100644
--- a/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/netcore/TensorPrimitives.LeadingZeroCount.cs
+++ b/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/netcore/TensorPrimitives.LeadingZeroCount.cs
@@ -1,7 +1,13 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
+using System.Diagnostics;
+using System.Runtime.CompilerServices;
using System.Runtime.Intrinsics;
+using System.Runtime.Intrinsics.Arm;
+using System.Runtime.Intrinsics.X86;
+
+#pragma warning disable CS8500 // This takes the address of, gets the size of, or declares a pointer to a managed type
namespace System.Numerics.Tensors
{
@@ -22,13 +28,56 @@ namespace System.Numerics.Tensors
InvokeSpanIntoSpan<T, LeadingZeroCountOperator<T>>(x, destination);
/// <summary>T.LeadingZeroCount(x)</summary>
- internal readonly struct LeadingZeroCountOperator<T> : IUnaryOperator<T, T> where T : IBinaryInteger<T>
+ internal readonly unsafe struct LeadingZeroCountOperator<T> : IUnaryOperator<T, T> where T : IBinaryInteger<T>
{
- public static bool Vectorizable => false; // TODO: Vectorize
+ public static bool Vectorizable =>
+ (Avx512CD.VL.IsSupported && (sizeof(T) == 4 || sizeof(T) == 8)) ||
+ (AdvSimd.IsSupported && (sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4));
+
public static T Invoke(T x) => T.LeadingZeroCount(x);
- public static Vector128<T> Invoke(Vector128<T> x) => throw new NotSupportedException();
- public static Vector256<T> Invoke(Vector256<T> x) => throw new NotSupportedException();
- public static Vector512<T> Invoke(Vector512<T> x) => throw new NotSupportedException();
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public static Vector128<T> Invoke(Vector128<T> x)
+ {
+ if (Avx512CD.VL.IsSupported)
+ {
+ if (sizeof(T) == 4) return Avx512CD.VL.LeadingZeroCount(x.AsUInt32()).As<uint, T>();
+ if (sizeof(T) == 8) return Avx512CD.VL.LeadingZeroCount(x.AsUInt64()).As<ulong, T>();
+ }
+
+ Debug.Assert(AdvSimd.IsSupported);
+ {
+ if (sizeof(T) == 1) return AdvSimd.LeadingZeroCount(x.AsByte()).As<byte, T>();
+ if (sizeof(T) == 2) return AdvSimd.LeadingZeroCount(x.AsUInt16()).As<ushort, T>();
+
+ Debug.Assert(sizeof(T) == 4);
+ return AdvSimd.LeadingZeroCount(x.AsUInt32()).As<uint, T>();
+ }
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public static Vector256<T> Invoke(Vector256<T> x)
+ {
+ if (Avx512CD.VL.IsSupported)
+ {
+ if (sizeof(T) == 4) return Avx512CD.VL.LeadingZeroCount(x.AsUInt32()).As<uint, T>();
+ if (sizeof(T) == 8) return Avx512CD.VL.LeadingZeroCount(x.AsUInt64()).As<ulong, T>();
+ }
+
+ return Vector256.Create(Invoke(x.GetLower()), Invoke(x.GetUpper()));
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public static Vector512<T> Invoke(Vector512<T> x)
+ {
+ if (Avx512CD.IsSupported)
+ {
+ if (sizeof(T) == 4) return Avx512CD.LeadingZeroCount(x.AsUInt32()).As<uint, T>();
+ if (sizeof(T) == 8) return Avx512CD.LeadingZeroCount(x.AsUInt64()).As<ulong, T>();
+ }
+
+ return Vector512.Create(Invoke(x.GetLower()), Invoke(x.GetUpper()));
+ }
}
}
}
diff --git a/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/netcore/TensorPrimitives.TrailingZeroCount.cs b/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/netcore/TensorPrimitives.TrailingZeroCount.cs
index f5a54534ef95..610c6b1b02d3 100644
--- a/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/netcore/TensorPrimitives.TrailingZeroCount.cs
+++ b/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/netcore/TensorPrimitives.TrailingZeroCount.cs
@@ -1,7 +1,12 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
+using System.Diagnostics;
+using System.Runtime.CompilerServices;
using System.Runtime.Intrinsics;
+using System.Runtime.Intrinsics.Arm;
+
+#pragma warning disable CS8500 // This takes the address of, gets the size of, or declares a pointer to a managed type
namespace System.Numerics.Tensors
{
@@ -22,13 +27,47 @@ namespace System.Numerics.Tensors
InvokeSpanIntoSpan<T, TrailingZeroCountOperator<T>>(x, destination);
/// <summary>T.TrailingZeroCount(x)</summary>
- private readonly struct TrailingZeroCountOperator<T> : IUnaryOperator<T, T> where T : IBinaryInteger<T>
+ private readonly unsafe struct TrailingZeroCountOperator<T> : IUnaryOperator<T, T> where T : IBinaryInteger<T>
{
- public static bool Vectorizable => false; // TODO: Vectorize
+ public static bool Vectorizable =>
+ (AdvSimd.IsSupported && AdvSimd.Arm64.IsSupported && sizeof(T) == 1) ||
+ PopCountOperator<T>.Vectorizable; // http://0x80.pl/notesen/2023-01-31-avx512-bsf.html#trailing-zeros-simplified
+
public static T Invoke(T x) => T.TrailingZeroCount(x);
- public static Vector128<T> Invoke(Vector128<T> x) => throw new NotSupportedException();
- public static Vector256<T> Invoke(Vector256<T> x) => throw new NotSupportedException();
- public static Vector512<T> Invoke(Vector512<T> x) => throw new NotSupportedException();
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public static Vector128<T> Invoke(Vector128<T> x)
+ {
+ if (AdvSimd.IsSupported && sizeof(T) == 1)
+ {
+ return AdvSimd.LeadingZeroCount(AdvSimd.Arm64.ReverseElementBits(x.AsByte())).As<byte, T>();
+ }
+
+ Debug.Assert(PopCountOperator<T>.Vectorizable);
+ return PopCountOperator<T>.Invoke(~x & (x - Vector128<T>.One));
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public static Vector256<T> Invoke(Vector256<T> x)
+ {
+ if (PopCountOperator<T>.Vectorizable)
+ {
+ return PopCountOperator<T>.Invoke(~x & (x - Vector256<T>.One));
+ }
+
+ return Vector256.Create(Invoke(x.GetLower()), Invoke(x.GetUpper()));
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public static Vector512<T> Invoke(Vector512<T> x)
+ {
+ if (PopCountOperator<T>.Vectorizable)
+ {
+ return PopCountOperator<T>.Invoke(~x & (x - Vector512<T>.One));
+ }
+
+ return Vector512.Create(Invoke(x.GetLower()), Invoke(x.GetUpper()));
+ }
}
}
}
diff --git a/src/libraries/System.Numerics.Tensors/tests/Helpers.cs b/src/libraries/System.Numerics.Tensors/tests/Helpers.cs
index d6b5eef63d9d..729cacda3516 100644
--- a/src/libraries/System.Numerics.Tensors/tests/Helpers.cs
+++ b/src/libraries/System.Numerics.Tensors/tests/Helpers.cs
@@ -11,5 +11,60 @@ namespace System.Numerics.Tensors.Tests
public static IEnumerable<int> TensorLengthsIncluding0 => Enumerable.Range(0, 257);
public static IEnumerable<int> TensorLengths => Enumerable.Range(1, 256);
+
+ // Tolerances taken from testing in the scalar math routines:
+ // cf. https://github.com/dotnet/runtime/blob/89f7ad3b276fb0b48f20cb4e8408bdce85c2b415/src/libraries/System.Runtime/tests/System.Runtime.Extensions.Tests/System/Math.cs
+ // and https://github.com/dotnet/runtime/blob/fd48b6f5d1ff81a81d09e9d72982cc9e8d139852/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/HalfTests.cs
+ public const double DefaultDoubleTolerance = 8.8817841970012523e-16;
+ public const float DefaultFloatTolerance = 4.76837158e-07f;
+ public const float DefaultHalfTolerance = 3.90625e-03f;
+ public const double DefaultToleranceForEstimates = 1.171875e-02;
+
+#if NETCOREAPP
+ private static class DefaultTolerance<T> where T : unmanaged, INumber<T>
+ {
+ public static readonly T Value = DetermineTolerance<T>(DefaultDoubleTolerance, DefaultFloatTolerance, Half.CreateTruncating(DefaultHalfTolerance)) ?? T.CreateTruncating(0);
+ }
+
+ public static bool IsEqualWithTolerance<T>(T expected, T actual, T? tolerance = null) where T : unmanaged, INumber<T>
+ {
+ tolerance = tolerance ?? DefaultTolerance<T>.Value;
+ T diff = T.Abs(expected - actual);
+ return !(diff > tolerance && diff > T.Max(T.Abs(expected), T.Abs(actual)) * tolerance);
+ }
+#else
+ public static bool IsEqualWithTolerance(float expected, float actual, float? tolerance = null)
+ {
+ tolerance ??= DefaultFloatTolerance;
+ float diff = MathF.Abs(expected - actual);
+ return !(diff > tolerance && diff > MathF.Max(MathF.Abs(expected), MathF.Abs(actual)) * tolerance);
+ }
+#endif
+
+ public static T? DetermineTolerance<T>(
+ double? doubleTolerance = null,
+ float? floatTolerance = null
+#if NETCOREAPP
+ , Half? halfTolerance = null
+#endif
+ ) where T : struct
+ {
+ if (typeof(T) == typeof(double) && doubleTolerance != null)
+ {
+ return (T?)(object)doubleTolerance;
+ }
+ else if (typeof(T) == typeof(float) && floatTolerance != null)
+ {
+ return (T?)(object)floatTolerance;
+ }
+#if NETCOREAPP
+ else if (typeof(T) == typeof(Half) && halfTolerance != null)
+ {
+ return (T?)(object)halfTolerance;
+ }
+#endif
+
+ return null;
+ }
}
}
diff --git a/src/libraries/System.Numerics.Tensors/tests/TensorPrimitives.Generic.cs b/src/libraries/System.Numerics.Tensors/tests/TensorPrimitives.Generic.cs
index cd6ae2455491..9328ab9e9c05 100644
--- a/src/libraries/System.Numerics.Tensors/tests/TensorPrimitives.Generic.cs
+++ b/src/libraries/System.Numerics.Tensors/tests/TensorPrimitives.Generic.cs
@@ -113,7 +113,7 @@ namespace System.Numerics.Tensors.Tests
for (int i = 0; i < tensorLength; i++)
{
- if (!IsEqualWithTolerance(TTo.CreateTruncating(source.Span[i]), destination.Span[i]))
+ if (!Helpers.IsEqualWithTolerance(TTo.CreateTruncating(source.Span[i]), destination.Span[i]))
{
throw new XunitException($"{typeof(TFrom).Name} => {typeof(TTo).Name}. Input: {source.Span[i]}. Actual: {destination.Span[i]}. Expected: {TTo.CreateTruncating(source.Span[i])}.");
}
@@ -145,7 +145,7 @@ namespace System.Numerics.Tensors.Tests
for (int i = 0; i < tensorLength; i++)
{
- if (!IsEqualWithTolerance(TTo.CreateSaturating(source.Span[i]), destination.Span[i]))
+ if (!Helpers.IsEqualWithTolerance(TTo.CreateSaturating(source.Span[i]), destination.Span[i]))
{
throw new XunitException($"{typeof(TFrom).Name} => {typeof(TTo).Name}. Input: {source.Span[i]}. Actual: {destination.Span[i]}. Expected: {TTo.CreateSaturating(source.Span[i])}.");
}
@@ -177,7 +177,7 @@ namespace System.Numerics.Tensors.Tests
for (int i = 0; i < tensorLength; i++)
{
- if (!IsEqualWithTolerance(TTo.CreateChecked(source.Span[i]), destination.Span[i]))
+ if (!Helpers.IsEqualWithTolerance(TTo.CreateChecked(source.Span[i]), destination.Span[i]))
{
throw new XunitException($"{typeof(TFrom).Name} => {typeof(TTo).Name}. Input: {source.Span[i]}. Actual: {destination.Span[i]}. Expected: {TTo.CreateChecked(source.Span[i])}.");
}
@@ -199,7 +199,7 @@ namespace System.Numerics.Tensors.Tests
TensorPrimitives.ConvertChecked<TFrom, TTo>(source.Span, destination.Span);
foreach (TTo result in destination.Span)
{
- Assert.True(IsEqualWithTolerance(TTo.CreateChecked(valid), result));
+ Assert.True(Helpers.IsEqualWithTolerance(TTo.CreateChecked(valid), result));
}
// Test with at least one invalid
@@ -211,19 +211,6 @@ namespace System.Numerics.Tensors.Tests
}
};
}
-
- private static bool IsEqualWithTolerance<T>(T expected, T actual, T? tolerance = null) where T : unmanaged, INumber<T>
- {
- tolerance ??= T.CreateTruncating(0.0001);
-
- T diff = T.Abs(expected - actual);
- if (diff > tolerance && diff > T.Max(T.Abs(expected), T.Abs(actual)) * tolerance)
- {
- return false;
- }
-
- return true;
- }
}
// The tests for some types have been marked as OuterLoop simply to decrease inner loop testing time.
@@ -371,18 +358,27 @@ namespace System.Numerics.Tensors.Tests
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Atanh), new Func<T, T>(T.Atanh) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.AtanPi), new Func<T, T>(T.AtanPi) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Atan), new Func<T, T>(T.Atan) };
- yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Cbrt), new Func<T, T>(T.Cbrt) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Cbrt), new Func<T, T>(T.Cbrt), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-13) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Ceiling), new Func<T, T>(T.Ceiling) };
- yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Cos), new Func<T, T>(T.Cos) };
- yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Cosh), new Func<T, T>(T.Cosh) };
- yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.CosPi), new Func<T, T>(T.CosPi) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Cos), new Func<T, T>(T.Cos), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-4, floatTolerance: 1e-4f) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Cosh), new Func<T, T>(T.Cosh), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-14) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.CosPi), new Func<T, T>(T.CosPi), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-4, floatTolerance: 1e-4f) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.DegreesToRadians), new Func<T, T>(T.DegreesToRadians) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Exp), new Func<T, T>(T.Exp) };
- yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Exp2), new Func<T, T>(T.Exp2) };
- yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Exp10), new Func<T, T>(T.Exp10) };
- yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.ExpM1), new Func<T, T>(T.ExpM1) };
- yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Exp2M1), new Func<T, T>(T.Exp2M1) };
- yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Exp10M1), new Func<T, T>(T.Exp10M1) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Exp2), new Func<T, T>(T.Exp2), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-6, floatTolerance: 1e-5f) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Exp10), new Func<T, T>(T.Exp10), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-6, floatTolerance: 1e-5f) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.ExpM1), new Func<T, T>(T.ExpM1), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-6) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Exp2M1), new Func<T, T>(T.Exp2M1), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-6, floatTolerance: 1e-5f) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Exp10M1), new Func<T, T>(T.Exp10M1), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-6, floatTolerance: 1e-5f) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Floor), new Func<T, T>(T.Floor) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Log), new Func<T, T>(T.Log) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Log2), new Func<T, T>(T.Log2) };
@@ -392,15 +388,19 @@ namespace System.Numerics.Tensors.Tests
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Log10P1), new Func<T, T>(T.Log10P1) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.RadiansToDegrees), new Func<T, T>(T.RadiansToDegrees) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Reciprocal), new Func<T, T>(f => T.One / f) };
- yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.ReciprocalEstimate), new Func<T, T>(T.ReciprocalEstimate), T.CreateTruncating(1.171875e-02) };
+ yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.ReciprocalEstimate), new Func<T, T>(T.ReciprocalEstimate), T.CreateTruncating(Helpers.DefaultToleranceForEstimates) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.ReciprocalSqrt), new Func<T, T>(f => T.One / T.Sqrt(f)) };
- yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.ReciprocalSqrtEstimate), new Func<T, T>(T.ReciprocalSqrtEstimate), T.CreateTruncating(1.171875e-02) };
+ yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.ReciprocalSqrtEstimate), new Func<T, T>(T.ReciprocalSqrtEstimate), T.CreateTruncating(Helpers.DefaultToleranceForEstimates) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Round), new Func<T, T>(T.Round) };
- yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Sin), new Func<T, T>(T.Sin) };
- yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Sinh), new Func<T, T>(T.Sinh) };
- yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.SinPi), new Func<T, T>(T.SinPi) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Sin), new Func<T, T>(T.Sin), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-13, floatTolerance: 1e-4f) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Sinh), new Func<T, T>(T.Sinh), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-14) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.SinPi), new Func<T, T>(T.SinPi), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-13, floatTolerance: 1e-4f) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Sqrt), new Func<T, T>(T.Sqrt) };
- yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Tan), new Func<T, T>(T.Tan) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Tan), new Func<T, T>(T.Tan), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-10, floatTolerance: 1e-4f) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Tanh), new Func<T, T>(T.Tanh) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.TanPi), new Func<T, T>(T.TanPi) };
yield return new object[] { new SpanDestinationDelegate(TensorPrimitives.Truncate), new Func<T, T>(T.Truncate) };
@@ -516,12 +516,13 @@ namespace System.Numerics.Tensors.Tests
yield return new object[] { new SpanSpanDestinationDelegate(TensorPrimitives.Hypot), new Func<T, T, T>(T.Hypot) };
yield return new object[] { new SpanSpanDestinationDelegate(TensorPrimitives.Ieee754Remainder), new Func<T, T, T>(T.Ieee754Remainder) };
yield return new object[] { new SpanSpanDestinationDelegate(TensorPrimitives.Log), new Func<T, T, T>(T.Log) };
- yield return new object[] { new SpanSpanDestinationDelegate(TensorPrimitives.Pow), new Func<T, T, T>(T.Pow) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanSpanDestinationDelegate(TensorPrimitives.Pow), new Func<T, T, T>(T.Pow), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-13, floatTolerance: 1e-4f) };
}
[Theory]
[MemberData(nameof(SpanSpanDestinationFunctionsToTest))]
- public void SpanSpanDestination_AllLengths(SpanSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod)
+ public void SpanSpanDestination_AllLengths(SpanSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
Assert.All(Helpers.TensorLengthsIncluding0, tensorLength =>
{
@@ -532,14 +533,14 @@ namespace System.Numerics.Tensors.Tests
tensorPrimitivesMethod(x, y, destination);
for (int i = 0; i < tensorLength; i++)
{
- AssertEqualTolerance(expectedMethod(x[i], y[i]), destination[i]);
+ AssertEqualTolerance(expectedMethod(x[i], y[i]), destination[i], tolerance);
}
});
}
[Theory]
[MemberData(nameof(SpanSpanDestinationFunctionsToTest))]
- public void SpanSpanDestination_InPlace(SpanSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod)
+ public void SpanSpanDestination_InPlace(SpanSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
Assert.All(Helpers.TensorLengthsIncluding0, tensorLength =>
{
@@ -550,14 +551,14 @@ namespace System.Numerics.Tensors.Tests
for (int i = 0; i < tensorLength; i++)
{
- AssertEqualTolerance(expectedMethod(xOrig[i], xOrig[i]), x[i]);
+ AssertEqualTolerance(expectedMethod(xOrig[i], xOrig[i]), x[i], tolerance);
}
});
}
[Theory]
[MemberData(nameof(SpanSpanDestinationFunctionsToTest))]
- public void SpanSpanDestination_SpecialValues(SpanSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod)
+ public void SpanSpanDestination_SpecialValues(SpanSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
Assert.All(Helpers.TensorLengths, tensorLength =>
{
@@ -570,7 +571,7 @@ namespace System.Numerics.Tensors.Tests
tensorPrimitivesMethod(x.Span, y.Span, destination.Span);
for (int i = 0; i < tensorLength; i++)
{
- AssertEqualTolerance(expectedMethod(x[i], y[i]), destination[i]);
+ AssertEqualTolerance(expectedMethod(x[i], y[i]), destination[i], tolerance);
}
}, x);
@@ -579,7 +580,7 @@ namespace System.Numerics.Tensors.Tests
tensorPrimitivesMethod(x.Span, y.Span, destination.Span);
for (int i = 0; i < tensorLength; i++)
{
- AssertEqualTolerance(expectedMethod(x[i], y[i]), destination[i]);
+ AssertEqualTolerance(expectedMethod(x[i], y[i]), destination[i], tolerance);
}
}, y);
});
@@ -587,8 +588,11 @@ namespace System.Numerics.Tensors.Tests
[Theory]
[MemberData(nameof(SpanSpanDestinationFunctionsToTest))]
- public void SpanSpanDestination_ThrowsForMismatchedLengths(SpanSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> _)
+ public void SpanSpanDestination_ThrowsForMismatchedLengths(SpanSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
+ _ = expectedMethod;
+ _ = tolerance;
+
Assert.All(Helpers.TensorLengths, tensorLength =>
{
using BoundedMemory<T> x = CreateAndFillTensor(tensorLength);
@@ -602,8 +606,11 @@ namespace System.Numerics.Tensors.Tests
[Theory]
[MemberData(nameof(SpanSpanDestinationFunctionsToTest))]
- public void SpanSpanDestination_ThrowsForTooShortDestination(SpanSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> _)
+ public void SpanSpanDestination_ThrowsForTooShortDestination(SpanSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
+ _ = expectedMethod;
+ _ = tolerance;
+
Assert.All(Helpers.TensorLengths, tensorLength =>
{
using BoundedMemory<T> x = CreateAndFillTensor(tensorLength);
@@ -616,8 +623,11 @@ namespace System.Numerics.Tensors.Tests
[Theory]
[MemberData(nameof(SpanSpanDestinationFunctionsToTest))]
- public void SpanSpanDestination_ThrowsForOverlapppingInputsWithOutputs(SpanSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> _)
+ public void SpanSpanDestination_ThrowsForOverlapppingInputsWithOutputs(SpanSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
+ _ = expectedMethod;
+ _ = tolerance;
+
T[] array = new T[10];
AssertExtensions.Throws<ArgumentException>("destination", () => tensorPrimitivesMethod(array.AsSpan(1, 2), array.AsSpan(5, 2), array.AsSpan(0, 2)));
AssertExtensions.Throws<ArgumentException>("destination", () => tensorPrimitivesMethod(array.AsSpan(1, 2), array.AsSpan(5, 2), array.AsSpan(2, 2)));
@@ -633,7 +643,8 @@ namespace System.Numerics.Tensors.Tests
yield return new object[] { new SpanScalarDestinationDelegate<T, T, T>(TensorPrimitives.Atan2Pi), new Func<T, T, T>(T.Atan2Pi) };
yield return new object[] { new SpanScalarDestinationDelegate<T, T, T>(TensorPrimitives.CopySign), new Func<T, T, T>(T.CopySign) };
yield return new object[] { new SpanScalarDestinationDelegate<T, T, T>(TensorPrimitives.Ieee754Remainder), new Func<T, T, T>(T.Ieee754Remainder) };
- yield return new object[] { new SpanScalarDestinationDelegate<T, T, T>(TensorPrimitives.Pow), new Func<T, T, T>(T.Pow) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanScalarDestinationDelegate<T, T, T>(TensorPrimitives.Pow), new Func<T, T, T>(T.Pow), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-13, floatTolerance: 1e-4f) };
yield return new object[] { new SpanScalarDestinationDelegate<T, T, T>(TensorPrimitives.Log), new Func<T, T, T>(T.Log) };
yield return new object[] { new SpanScalarDestinationDelegate<T, T, T>(TensorPrimitives.Max), new Func<T, T, T>(T.Max) };
yield return new object[] { new SpanScalarDestinationDelegate<T, T, T>(TensorPrimitives.MaxMagnitude), new Func<T, T, T>(T.MaxMagnitude) };
@@ -643,7 +654,7 @@ namespace System.Numerics.Tensors.Tests
[Theory]
[MemberData(nameof(SpanScalarDestinationFunctionsToTest))]
- public void SpanScalarDestination_AllLengths(SpanScalarDestinationDelegate<T, T, T> tensorPrimitivesMethod, Func<T, T, T> expectedMethod)
+ public void SpanScalarDestination_AllLengths(SpanScalarDestinationDelegate<T, T, T> tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
Assert.All(Helpers.TensorLengthsIncluding0, tensorLength =>
{
@@ -654,14 +665,14 @@ namespace System.Numerics.Tensors.Tests
tensorPrimitivesMethod(x, y, destination);
for (int i = 0; i < tensorLength; i++)
{
- AssertEqualTolerance(expectedMethod(x[i], y), destination[i]);
+ AssertEqualTolerance(expectedMethod(x[i], y), destination[i], tolerance);
}
});
}
[Theory]
[MemberData(nameof(SpanScalarDestinationFunctionsToTest))]
- public void SpanScalarDestination_InPlace(SpanScalarDestinationDelegate<T, T, T> tensorPrimitivesMethod, Func<T, T, T> expectedMethod)
+ public void SpanScalarDestination_InPlace(SpanScalarDestinationDelegate<T, T, T> tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
Assert.All(Helpers.TensorLengthsIncluding0, tensorLength =>
{
@@ -673,14 +684,14 @@ namespace System.Numerics.Tensors.Tests
for (int i = 0; i < tensorLength; i++)
{
- AssertEqualTolerance(expectedMethod(xOrig[i], y), x[i]);
+ AssertEqualTolerance(expectedMethod(xOrig[i], y), x[i], tolerance);
}
});
}
[Theory]
[MemberData(nameof(SpanScalarDestinationFunctionsToTest))]
- public void SpanScalarDestination_SpecialValues(SpanScalarDestinationDelegate<T, T, T> tensorPrimitivesMethod, Func<T, T, T> expectedMethod)
+ public void SpanScalarDestination_SpecialValues(SpanScalarDestinationDelegate<T, T, T> tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
Assert.All(Helpers.TensorLengths, tensorLength =>
{
@@ -693,7 +704,7 @@ namespace System.Numerics.Tensors.Tests
tensorPrimitivesMethod(x.Span, y, destination.Span);
for (int i = 0; i < tensorLength; i++)
{
- AssertEqualTolerance(expectedMethod(x[i], y), destination[i]);
+ AssertEqualTolerance(expectedMethod(x[i], y), destination[i], tolerance);
}
}, x);
});
@@ -701,8 +712,11 @@ namespace System.Numerics.Tensors.Tests
[Theory]
[MemberData(nameof(SpanScalarDestinationFunctionsToTest))]
- public void SpanScalarDestination_ThrowsForTooShortDestination(SpanScalarDestinationDelegate<T, T, T> tensorPrimitivesMethod, Func<T, T, T> _)
+ public void SpanScalarDestination_ThrowsForTooShortDestination(SpanScalarDestinationDelegate<T, T, T> tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
+ _ = expectedMethod;
+ _ = tolerance;
+
Assert.All(Helpers.TensorLengths, tensorLength =>
{
using BoundedMemory<T> x = CreateAndFillTensor(tensorLength);
@@ -715,8 +729,11 @@ namespace System.Numerics.Tensors.Tests
[Theory]
[MemberData(nameof(SpanScalarDestinationFunctionsToTest))]
- public void SpanScalarDestination_ThrowsForOverlapppingInputsWithOutputs(SpanScalarDestinationDelegate<T, T, T> tensorPrimitivesMethod, Func<T, T, T> _)
+ public void SpanScalarDestination_ThrowsForOverlapppingInputsWithOutputs(SpanScalarDestinationDelegate<T, T, T> tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
+ _ = expectedMethod;
+ _ = tolerance;
+
T[] array = new T[10];
AssertExtensions.Throws<ArgumentException>("destination", () => tensorPrimitivesMethod(array.AsSpan(1, 2), default, array.AsSpan(0, 2)));
AssertExtensions.Throws<ArgumentException>("destination", () => tensorPrimitivesMethod(array.AsSpan(1, 2), default, array.AsSpan(2, 2)));
@@ -728,13 +745,14 @@ namespace System.Numerics.Tensors.Tests
{
yield return new object[] { new ScalarSpanDestinationDelegate(TensorPrimitives.Atan2), new Func<T, T, T>(T.Atan2) };
yield return new object[] { new ScalarSpanDestinationDelegate(TensorPrimitives.Atan2Pi), new Func<T, T, T>(T.Atan2Pi) };
- yield return new object[] { new ScalarSpanDestinationDelegate(TensorPrimitives.Pow), new Func<T, T, T>(T.Pow) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new ScalarSpanDestinationDelegate(TensorPrimitives.Pow), new Func<T, T, T>(T.Pow), Helpers.DetermineTolerance<T>(floatTolerance: 1e-4f)};
yield return new object[] { new ScalarSpanDestinationDelegate(TensorPrimitives.Ieee754Remainder), new Func<T, T, T>(T.Ieee754Remainder) };
}
[Theory]
[MemberData(nameof(ScalarSpanFloatDestinationFunctionsToTest))]
- public void SpanScalarFloatDestination_AllLengths(ScalarSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod)
+ public void SpanScalarFloatDestination_AllLengths(ScalarSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
Assert.All(Helpers.TensorLengthsIncluding0, tensorLength =>
{
@@ -745,14 +763,14 @@ namespace System.Numerics.Tensors.Tests
tensorPrimitivesMethod(x, y, destination);
for (int i = 0; i < tensorLength; i++)
{
- AssertEqualTolerance(expectedMethod(x, y[i]), destination[i]);
+ AssertEqualTolerance(expectedMethod(x, y[i]), destination[i], tolerance);
}
});
}
[Theory]
[MemberData(nameof(ScalarSpanFloatDestinationFunctionsToTest))]
- public void SpanScalarFloatDestination_InPlace(ScalarSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod)
+ public void SpanScalarFloatDestination_InPlace(ScalarSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
Assert.All(Helpers.TensorLengthsIncluding0, tensorLength =>
{
@@ -764,14 +782,14 @@ namespace System.Numerics.Tensors.Tests
for (int i = 0; i < tensorLength; i++)
{
- AssertEqualTolerance(expectedMethod(x, yOrig[i]), y[i]);
+ AssertEqualTolerance(expectedMethod(x, yOrig[i]), y[i], tolerance);
}
});
}
[Theory]
[MemberData(nameof(ScalarSpanFloatDestinationFunctionsToTest))]
- public void ScalarSpanDestination_SpecialValues(ScalarSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod)
+ public void ScalarSpanDestination_SpecialValues(ScalarSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
Assert.All(Helpers.TensorLengths, tensorLength =>
{
@@ -784,7 +802,7 @@ namespace System.Numerics.Tensors.Tests
tensorPrimitivesMethod(x, y.Span, destination.Span);
for (int i = 0; i < tensorLength; i++)
{
- AssertEqualTolerance(expectedMethod(x, y[i]), destination[i]);
+ AssertEqualTolerance(expectedMethod(x, y[i]), destination[i], tolerance);
}
}, y);
});
@@ -792,8 +810,11 @@ namespace System.Numerics.Tensors.Tests
[Theory]
[MemberData(nameof(ScalarSpanFloatDestinationFunctionsToTest))]
- public void SpanScalarFloatDestination_ThrowsForTooShortDestination(ScalarSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> _)
+ public void SpanScalarFloatDestination_ThrowsForTooShortDestination(ScalarSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
+ _ = expectedMethod;
+ _ = tolerance;
+
Assert.All(Helpers.TensorLengths, tensorLength =>
{
T x = NextRandom();
@@ -806,8 +827,11 @@ namespace System.Numerics.Tensors.Tests
[Theory]
[MemberData(nameof(ScalarSpanFloatDestinationFunctionsToTest))]
- public void SpanScalarFloatDestination_ThrowsForOverlapppingInputsWithOutputs(ScalarSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> _)
+ public void SpanScalarFloatDestination_ThrowsForOverlapppingInputsWithOutputs(ScalarSpanDestinationDelegate tensorPrimitivesMethod, Func<T, T, T> expectedMethod, T? tolerance = null)
{
+ _ = expectedMethod;
+ _ = tolerance;
+
T[] array = new T[10];
AssertExtensions.Throws<ArgumentException>("destination", () => tensorPrimitivesMethod(default, array.AsSpan(1, 2), array.AsSpan(0, 2)));
AssertExtensions.Throws<ArgumentException>("destination", () => tensorPrimitivesMethod(default, array.AsSpan(1, 2), array.AsSpan(2, 2)));
@@ -817,13 +841,14 @@ namespace System.Numerics.Tensors.Tests
#region Span,Int,Span -> Destination
public static IEnumerable<object[]> SpanIntDestinationFunctionsToTest()
{
- yield return new object[] { new SpanScalarDestinationDelegate<T, int, T>(TensorPrimitives.RootN), new Func<T, int, T>(T.RootN) };
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ yield return new object[] { new SpanScalarDestinationDelegate<T, int, T>(TensorPrimitives.RootN), new Func<T, int, T>(T.RootN), Helpers.DetermineTolerance<T>(doubleTolerance: 1e-13) };
yield return new object[] { new SpanScalarDestinationDelegate<T, int, T>(TensorPrimitives.ScaleB), new Func<T, int, T>(T.ScaleB) };
}
[Theory]
[MemberData(nameof(SpanIntDestinationFunctionsToTest))]
- public void SpanIntDestination_AllLengths(SpanScalarDestinationDelegate<T, int, T> tensorPrimitivesMethod, Func<T, int, T> expectedMethod)
+ public void SpanIntDestination_AllLengths(SpanScalarDestinationDelegate<T, int, T> tensorPrimitivesMethod, Func<T, int, T> expectedMethod, T? tolerance = null)
{
Assert.All(Helpers.TensorLengthsIncluding0, tensorLength =>
{
@@ -834,14 +859,14 @@ namespace System.Numerics.Tensors.Tests
tensorPrimitivesMethod(x, y, destination);
for (int i = 0; i < tensorLength; i++)
{
- AssertEqualTolerance(expectedMethod(x[i], y), destination[i]);
+ AssertEqualTolerance(expectedMethod(x[i], y), destination[i], tolerance);
}
});
}
[Theory]
[MemberData(nameof(SpanIntDestinationFunctionsToTest))]
- public void SpanIntDestination_InPlace(SpanScalarDestinationDelegate<T, int, T> tensorPrimitivesMethod, Func<T, int, T> expectedMethod)
+ public void SpanIntDestination_InPlace(SpanScalarDestinationDelegate<T, int, T> tensorPrimitivesMethod, Func<T, int, T> expectedMethod, T? tolerance = null)
{
Assert.All(Helpers.TensorLengthsIncluding0, tensorLength =>
{
@@ -853,14 +878,14 @@ namespace System.Numerics.Tensors.Tests
for (int i = 0; i < tensorLength; i++)
{
- AssertEqualTolerance(expectedMethod(xOrig[i], y), x[i]);
+ AssertEqualTolerance(expectedMethod(xOrig[i], y), x[i], tolerance);
}
});
}
[Theory]
[MemberData(nameof(SpanIntDestinationFunctionsToTest))]
- public void SpanIntDestination_SpecialValues(SpanScalarDestinationDelegate<T, int, T> tensorPrimitivesMethod, Func<T, int, T> expectedMethod)
+ public void SpanIntDestination_SpecialValues(SpanScalarDestinationDelegate<T, int, T> tensorPrimitivesMethod, Func<T, int, T> expectedMethod, T? tolerance = null)
{
Assert.All(Helpers.TensorLengths, tensorLength =>
{
@@ -873,7 +898,7 @@ namespace System.Numerics.Tensors.Tests
tensorPrimitivesMethod(x.Span, y, destination.Span);
for (int i = 0; i < tensorLength; i++)
{
- AssertEqualTolerance(expectedMethod(x[i], y), destination[i]);
+ AssertEqualTolerance(expectedMethod(x[i], y), destination[i], tolerance);
}
}, x);
});
@@ -881,8 +906,11 @@ namespace System.Numerics.Tensors.Tests
[Theory]
[MemberData(nameof(SpanIntDestinationFunctionsToTest))]
- public void SpanIntDestination_ThrowsForTooShortDestination(SpanScalarDestinationDelegate<T, int, T> tensorPrimitivesMethod, Func<T, int, T> _)
+ public void SpanIntDestination_ThrowsForTooShortDestination(SpanScalarDestinationDelegate<T, int, T> tensorPrimitivesMethod, Func<T, int, T> expectedMethod, T? tolerance = null)
{
+ _ = expectedMethod;
+ _ = tolerance;
+
Assert.All(Helpers.TensorLengths, tensorLength =>
{
using BoundedMemory<T> x = CreateAndFillTensor(tensorLength);
@@ -895,8 +923,11 @@ namespace System.Numerics.Tensors.Tests
[Theory]
[MemberData(nameof(SpanIntDestinationFunctionsToTest))]
- public void SpanIntDestination_ThrowsForOverlapppingInputsWithOutputs(SpanScalarDestinationDelegate<T, int, T> tensorPrimitivesMethod, Func<T, int, T> _)
+ public void SpanIntDestination_ThrowsForOverlapppingInputsWithOutputs(SpanScalarDestinationDelegate<T, int, T> tensorPrimitivesMethod, Func<T, int, T> expectedMethod, T? tolerance = null)
{
+ _ = expectedMethod;
+ _ = tolerance;
+
T[] array = new T[10];
AssertExtensions.Throws<ArgumentException>("destination", () => tensorPrimitivesMethod(array.AsSpan(1, 2), 2, array.AsSpan(0, 2)));
AssertExtensions.Throws<ArgumentException>("destination", () => tensorPrimitivesMethod(array.AsSpan(1, 2), 2, array.AsSpan(2, 2)));
@@ -2071,10 +2102,7 @@ namespace System.Numerics.Tensors.Tests
protected override void AssertEqualTolerance(T expected, T actual, T? tolerance = null)
{
- tolerance ??= T.CreateTruncating(0.0001);
-
- T diff = T.Abs(expected - actual);
- if (diff > tolerance && diff > T.Max(T.Abs(expected), T.Abs(actual)) * tolerance)
+ if (!Helpers.IsEqualWithTolerance(expected, actual, tolerance))
{
throw EqualException.ForMismatchedValues(expected, actual);
}
diff --git a/src/libraries/System.Numerics.Tensors/tests/TensorPrimitives.NonGeneric.Single.cs b/src/libraries/System.Numerics.Tensors/tests/TensorPrimitives.NonGeneric.Single.cs
index ac883851299d..d6df8365c59d 100644
--- a/src/libraries/System.Numerics.Tensors/tests/TensorPrimitives.NonGeneric.Single.cs
+++ b/src/libraries/System.Numerics.Tensors/tests/TensorPrimitives.NonGeneric.Single.cs
@@ -106,10 +106,7 @@ namespace System.Numerics.Tensors.Tests
protected override void AssertEqualTolerance(float expected, float actual, float? tolerance = null)
{
- tolerance ??= 0.0001f;
-
- double diff = Math.Abs((double)expected - (double)actual);
- if (diff > tolerance && diff > Math.Max(Math.Abs(expected), Math.Abs(actual)) * tolerance)
+ if (!Helpers.IsEqualWithTolerance(expected, actual, tolerance))
{
throw EqualException.ForMismatchedValues(expected, actual);
}
diff --git a/src/libraries/System.Numerics.Tensors/tests/TensorPrimitivesTests.cs b/src/libraries/System.Numerics.Tensors/tests/TensorPrimitivesTests.cs
index b0b44fddb0ae..173e57649cf8 100644
--- a/src/libraries/System.Numerics.Tensors/tests/TensorPrimitivesTests.cs
+++ b/src/libraries/System.Numerics.Tensors/tests/TensorPrimitivesTests.cs
@@ -663,6 +663,9 @@ namespace System.Numerics.Tensors.Tests
{
if (!IsFloatingPoint) return;
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ T? tolerance = Helpers.DetermineTolerance<T>(doubleTolerance: 1e-14);
+
Assert.All(VectorLengthAndIteratedRange(ConvertFromSingle(-100f), ConvertFromSingle(100f), ConvertFromSingle(3f)), arg =>
{
T[] x = new T[arg.Length];
@@ -674,7 +677,7 @@ namespace System.Numerics.Tensors.Tests
T expected = Cosh(arg.Element);
foreach (T actual in dest)
{
- AssertEqualTolerance(expected, actual);
+ AssertEqualTolerance(expected, actual, tolerance);
}
});
}
@@ -952,6 +955,9 @@ namespace System.Numerics.Tensors.Tests
[Fact]
public void Dot_AllLengths()
{
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ T? tolerance = Helpers.DetermineTolerance<T>(doubleTolerance: 1e-14f, floatTolerance: 1e-3f);
+
Assert.All(Helpers.TensorLengthsIncluding0, tensorLength =>
{
using BoundedMemory<T> x = CreateAndFillTensor(tensorLength);
@@ -963,7 +969,7 @@ namespace System.Numerics.Tensors.Tests
dot = Add(dot, Multiply(x[i], y[i]));
}
- AssertEqualTolerance(dot, Dot(x, y));
+ AssertEqualTolerance(dot, Dot(x, y), tolerance);
});
}
#endregion
@@ -2879,6 +2885,9 @@ namespace System.Numerics.Tensors.Tests
{
if (!IsFloatingPoint) return;
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ T? tolerance = Helpers.DetermineTolerance<T>(doubleTolerance: 1e-14);
+
Assert.All(VectorLengthAndIteratedRange(ConvertFromSingle(-100f), ConvertFromSingle(100f), ConvertFromSingle(3f)), args =>
{
T[] x = new T[args.Length];
@@ -2890,7 +2899,7 @@ namespace System.Numerics.Tensors.Tests
T expected = Sinh(args.Element);
foreach (T actual in dest)
{
- AssertEqualTolerance(expected, actual);
+ AssertEqualTolerance(expected, actual, tolerance);
}
});
}
@@ -3139,6 +3148,9 @@ namespace System.Numerics.Tensors.Tests
[Fact]
public void Sum_AllLengths()
{
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ T? tolerance = Helpers.DetermineTolerance<T>(doubleTolerance: 1e-13, floatTolerance: 1e-5f);
+
Assert.All(Helpers.TensorLengths, tensorLength =>
{
using BoundedMemory<T> x = CreateAndFillTensor(tensorLength);
@@ -3148,7 +3160,7 @@ namespace System.Numerics.Tensors.Tests
{
sum = Add(sum, value);
}
- AssertEqualTolerance(sum, Sum(x));
+ AssertEqualTolerance(sum, Sum(x), tolerance);
});
}
#endregion
@@ -3157,6 +3169,9 @@ namespace System.Numerics.Tensors.Tests
[Fact]
public void SumOfMagnitudes_AllLengths()
{
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ T? tolerance = Helpers.DetermineTolerance<T>(doubleTolerance: 1e-12, floatTolerance: 1e-6f);
+
Assert.All(Helpers.TensorLengths, tensorLength =>
{
using BoundedMemory<T> x = CreateTensor(tensorLength);
@@ -3167,7 +3182,7 @@ namespace System.Numerics.Tensors.Tests
{
sum = Add(sum, Abs(value));
}
- AssertEqualTolerance(sum, SumOfMagnitudes(x));
+ AssertEqualTolerance(sum, SumOfMagnitudes(x), tolerance);
});
}
#endregion
@@ -3176,6 +3191,9 @@ namespace System.Numerics.Tensors.Tests
[Fact]
public void SumOfSquares_AllLengths()
{
+ // TODO https://github.com/dotnet/runtime/issues/98861
+ T? tolerance = Helpers.DetermineTolerance<T>(doubleTolerance: 1e-12, floatTolerance: 1e-6f);
+
Assert.All(Helpers.TensorLengths, tensorLength =>
{
using BoundedMemory<T> x = CreateAndFillTensor(tensorLength);
@@ -3185,7 +3203,7 @@ namespace System.Numerics.Tensors.Tests
{
sum = Add(sum, Multiply(value, value));
}
- AssertEqualTolerance(sum, SumOfSquares(x));
+ AssertEqualTolerance(sum, SumOfSquares(x), tolerance);
});
}
#endregion
diff --git a/src/libraries/System.Private.CoreLib/src/System.Private.CoreLib.Shared.projitems b/src/libraries/System.Private.CoreLib/src/System.Private.CoreLib.Shared.projitems
index a73e8247a58e..6375de34309e 100644
--- a/src/libraries/System.Private.CoreLib/src/System.Private.CoreLib.Shared.projitems
+++ b/src/libraries/System.Private.CoreLib/src/System.Private.CoreLib.Shared.projitems
@@ -1084,6 +1084,7 @@
<Compile Include="$(MSBuildThisFileDirectory)System\SByte.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Security\AllowPartiallyTrustedCallersAttribute.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Security\CryptographicException.cs" />
+ <Compile Include="$(MSBuildThisFileDirectory)System\Security\DynamicSecurityMethodAttribute.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Security\IPermission.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Security\ISecurityEncodable.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Security\IStackWalk.cs" />
@@ -1118,6 +1119,7 @@
<Compile Include="$(MSBuildThisFileDirectory)System\SpanDebugView.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\SpanHelpers.BinarySearch.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\SpanHelpers.Byte.cs" />
+ <Compile Include="$(MSBuildThisFileDirectory)System\SpanHelpers.ByteMemOps.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\SpanHelpers.Char.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\SpanHelpers.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\SpanHelpers.Packed.cs" />
@@ -2140,7 +2142,6 @@
<Compile Include="$(MSBuildThisFileDirectory)Microsoft\Win32\SafeHandles\SafeRegistryHandle.cs" />
<Compile Include="$(MSBuildThisFileDirectory)Microsoft\Win32\SafeHandles\SafeThreadPoolIOHandle.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\AppDomain.Windows.cs" />
- <Compile Include="$(MSBuildThisFileDirectory)System\Buffer.Windows.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\DateTime.Windows.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Environment.Win32.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Environment.Windows.cs" />
@@ -2458,7 +2459,6 @@
<Compile Include="$(MSBuildThisFileDirectory)Microsoft\Win32\SafeHandles\SafeFileHandle.ThreadPoolValueTaskSource.cs" />
<Compile Include="$(MSBuildThisFileDirectory)Microsoft\Win32\SafeHandles\SafeFileHandle.Unix.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\AppDomain.Unix.cs" />
- <Compile Include="$(MSBuildThisFileDirectory)System\Buffer.Unix.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\DateTime.Unix.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Diagnostics\DebugProvider.Unix.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Diagnostics\Stopwatch.Unix.cs" />
diff --git a/src/libraries/System.Private.CoreLib/src/System/Array.cs b/src/libraries/System.Private.CoreLib/src/System/Array.cs
index c21caa8cc1d7..84bd5ed20eed 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Array.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Array.cs
@@ -58,7 +58,7 @@ namespace System
// actually of type U[], where U:T; or that an int[] <-> uint[] or
// similar cast has occurred. In any case, since it's always legal
// to reinterpret U as T in this scenario (but not necessarily the
- // other way around), we can use Buffer.Memmove here.
+ // other way around), we can use SpanHelpers.Memmove here.
T[] newArray = new T[newSize];
Buffer.Memmove(
@@ -377,7 +377,7 @@ namespace System
if (pMT->ContainsGCPointers)
Buffer.BulkMoveWithWriteBarrier(ref dst, ref src, byteCount);
else
- Buffer.Memmove(ref dst, ref src, byteCount);
+ SpanHelpers.Memmove(ref dst, ref src, byteCount);
// GC.KeepAlive(sourceArray) not required. pMT kept alive via sourceArray
return;
@@ -408,7 +408,7 @@ namespace System
if (pMT->ContainsGCPointers)
Buffer.BulkMoveWithWriteBarrier(ref dst, ref src, byteCount);
else
- Buffer.Memmove(ref dst, ref src, byteCount);
+ SpanHelpers.Memmove(ref dst, ref src, byteCount);
// GC.KeepAlive(sourceArray) not required. pMT kept alive via sourceArray
return;
diff --git a/src/libraries/System.Private.CoreLib/src/System/Buffer.Unix.cs b/src/libraries/System.Private.CoreLib/src/System/Buffer.Unix.cs
deleted file mode 100644
index 008bc9310a24..000000000000
--- a/src/libraries/System.Private.CoreLib/src/System/Buffer.Unix.cs
+++ /dev/null
@@ -1,19 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-namespace System
-{
- public static partial class Buffer
- {
-#if TARGET_ARM64 || TARGET_LOONGARCH64
- // Managed code is currently faster than glibc unoptimized memmove
- // TODO-ARM64-UNIX-OPT revisit when glibc optimized memmove is in Linux distros
- // https://github.com/dotnet/runtime/issues/8897
- private static nuint MemmoveNativeThreshold => nuint.MaxValue;
-#elif TARGET_ARM
- private const nuint MemmoveNativeThreshold = 512;
-#else
- private const nuint MemmoveNativeThreshold = 2048;
-#endif
- }
-}
diff --git a/src/libraries/System.Private.CoreLib/src/System/Buffer.Windows.cs b/src/libraries/System.Private.CoreLib/src/System/Buffer.Windows.cs
deleted file mode 100644
index 4dea08790b91..000000000000
--- a/src/libraries/System.Private.CoreLib/src/System/Buffer.Windows.cs
+++ /dev/null
@@ -1,16 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-namespace System
-{
- public static partial class Buffer
- {
-#if TARGET_ARM64
- // Determine optimal value for Windows.
- // https://github.com/dotnet/runtime/issues/8896
- private static nuint MemmoveNativeThreshold => nuint.MaxValue;
-#else
- private const nuint MemmoveNativeThreshold = 2048;
-#endif
- }
-}
diff --git a/src/libraries/System.Private.CoreLib/src/System/Buffer.cs b/src/libraries/System.Private.CoreLib/src/System/Buffer.cs
index 51ec733aaef5..24f8794d852a 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Buffer.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Buffer.cs
@@ -1,14 +1,9 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-#if TARGET_AMD64 || TARGET_ARM64 || (TARGET_32BIT && !TARGET_ARM) || TARGET_LOONGARCH64
-#define HAS_CUSTOM_BLOCKS
-#endif
-
using System.Diagnostics;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
-using System.Runtime.Intrinsics;
namespace System
{
@@ -128,227 +123,16 @@ namespace System
Memmove(ref *(byte*)destination, ref *(byte*)source, checked((nuint)sourceBytesToCopy));
}
- [Intrinsic] // Unrolled for small constant lengths
- internal static unsafe void Memmove(ref byte dest, ref byte src, nuint len)
- {
- // P/Invoke into the native version when the buffers are overlapping.
- if (((nuint)(nint)Unsafe.ByteOffset(ref src, ref dest) < len) || ((nuint)(nint)Unsafe.ByteOffset(ref dest, ref src) < len))
- {
- goto BuffersOverlap;
- }
-
- // Use "(IntPtr)(nint)len" to avoid overflow checking on the explicit cast to IntPtr
-
- ref byte srcEnd = ref Unsafe.Add(ref src, (IntPtr)(nint)len);
- ref byte destEnd = ref Unsafe.Add(ref dest, (IntPtr)(nint)len);
-
- if (len <= 16)
- goto MCPY02;
- if (len > 64)
- goto MCPY05;
-
- MCPY00:
- // Copy bytes which are multiples of 16 and leave the remainder for MCPY01 to handle.
- Debug.Assert(len > 16 && len <= 64);
-#if HAS_CUSTOM_BLOCKS
- Unsafe.As<byte, Block16>(ref dest) = Unsafe.As<byte, Block16>(ref src); // [0,16]
-#elif TARGET_64BIT
- Unsafe.As<byte, long>(ref dest) = Unsafe.As<byte, long>(ref src);
- Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 8)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 8)); // [0,16]
-#else
- Unsafe.As<byte, int>(ref dest) = Unsafe.As<byte, int>(ref src);
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 4));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 8)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 8));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 12)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 12)); // [0,16]
-#endif
- if (len <= 32)
- goto MCPY01;
-#if HAS_CUSTOM_BLOCKS
- Unsafe.As<byte, Block16>(ref Unsafe.Add(ref dest, 16)) = Unsafe.As<byte, Block16>(ref Unsafe.Add(ref src, 16)); // [0,32]
-#elif TARGET_64BIT
- Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 16)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 16));
- Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 24)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 24)); // [0,32]
-#else
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 16)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 16));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 20)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 20));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 24)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 24));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 28)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 28)); // [0,32]
-#endif
- if (len <= 48)
- goto MCPY01;
-#if HAS_CUSTOM_BLOCKS
- Unsafe.As<byte, Block16>(ref Unsafe.Add(ref dest, 32)) = Unsafe.As<byte, Block16>(ref Unsafe.Add(ref src, 32)); // [0,48]
-#elif TARGET_64BIT
- Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 32)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 32));
- Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 40)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 40)); // [0,48]
-#else
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 32)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 32));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 36)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 36));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 40)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 40));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 44)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 44)); // [0,48]
-#endif
-
- MCPY01:
- // Unconditionally copy the last 16 bytes using destEnd and srcEnd and return.
- Debug.Assert(len > 16 && len <= 64);
-#if HAS_CUSTOM_BLOCKS
- Unsafe.As<byte, Block16>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, Block16>(ref Unsafe.Add(ref srcEnd, -16));
-#elif TARGET_64BIT
- Unsafe.As<byte, long>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref srcEnd, -16));
- Unsafe.As<byte, long>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref srcEnd, -8));
-#else
- Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -16));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -12)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -12));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -8));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -4));
-#endif
- return;
-
- MCPY02:
- // Copy the first 8 bytes and then unconditionally copy the last 8 bytes and return.
- if ((len & 24) == 0)
- goto MCPY03;
- Debug.Assert(len >= 8 && len <= 16);
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref dest) = Unsafe.As<byte, long>(ref src);
- Unsafe.As<byte, long>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref srcEnd, -8));
-#else
- Unsafe.As<byte, int>(ref dest) = Unsafe.As<byte, int>(ref src);
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 4));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -8));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -4));
-#endif
- return;
-
- MCPY03:
- // Copy the first 4 bytes and then unconditionally copy the last 4 bytes and return.
- if ((len & 4) == 0)
- goto MCPY04;
- Debug.Assert(len >= 4 && len < 8);
- Unsafe.As<byte, int>(ref dest) = Unsafe.As<byte, int>(ref src);
- Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -4));
- return;
-
- MCPY04:
- // Copy the first byte. For pending bytes, do an unconditionally copy of the last 2 bytes and return.
- Debug.Assert(len < 4);
- if (len == 0)
- return;
- dest = src;
- if ((len & 2) == 0)
- return;
- Unsafe.As<byte, short>(ref Unsafe.Add(ref destEnd, -2)) = Unsafe.As<byte, short>(ref Unsafe.Add(ref srcEnd, -2));
- return;
-
- MCPY05:
- // PInvoke to the native version when the copy length exceeds the threshold.
- if (len > MemmoveNativeThreshold)
- {
- goto PInvoke;
- }
-
-#if HAS_CUSTOM_BLOCKS
- if (len >= 256)
- {
- // Try to opportunistically align the destination below. The input isn't pinned, so the GC
- // is free to move the references. We're therefore assuming that reads may still be unaligned.
- //
- // dest is more important to align than src because an unaligned store is more expensive
- // than an unaligned load.
- nuint misalignedElements = 64 - (nuint)Unsafe.AsPointer(ref dest) & 63;
- Unsafe.As<byte, Block64>(ref dest) = Unsafe.As<byte, Block64>(ref src);
- src = ref Unsafe.Add(ref src, misalignedElements);
- dest = ref Unsafe.Add(ref dest, misalignedElements);
- len -= misalignedElements;
- }
-#endif
-
- // Copy 64-bytes at a time until the remainder is less than 64.
- // If remainder is greater than 16 bytes, then jump to MCPY00. Otherwise, unconditionally copy the last 16 bytes and return.
- Debug.Assert(len > 64 && len <= MemmoveNativeThreshold);
- nuint n = len >> 6;
-
- MCPY06:
-#if HAS_CUSTOM_BLOCKS
- Unsafe.As<byte, Block64>(ref dest) = Unsafe.As<byte, Block64>(ref src);
-#elif TARGET_64BIT
- Unsafe.As<byte, long>(ref dest) = Unsafe.As<byte, long>(ref src);
- Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 8)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 8));
- Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 16)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 16));
- Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 24)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 24));
- Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 32)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 32));
- Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 40)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 40));
- Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 48)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 48));
- Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 56)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 56));
-#else
- Unsafe.As<byte, int>(ref dest) = Unsafe.As<byte, int>(ref src);
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 4));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 8)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 8));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 12)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 12));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 16)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 16));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 20)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 20));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 24)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 24));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 28)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 28));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 32)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 32));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 36)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 36));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 40)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 40));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 44)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 44));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 48)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 48));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 52)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 52));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 56)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 56));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 60)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 60));
-#endif
- dest = ref Unsafe.Add(ref dest, 64);
- src = ref Unsafe.Add(ref src, 64);
- n--;
- if (n != 0)
- goto MCPY06;
-
- len %= 64;
- if (len > 16)
- goto MCPY00;
-#if HAS_CUSTOM_BLOCKS
- Unsafe.As<byte, Block16>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, Block16>(ref Unsafe.Add(ref srcEnd, -16));
-#elif TARGET_64BIT
- Unsafe.As<byte, long>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref srcEnd, -16));
- Unsafe.As<byte, long>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref srcEnd, -8));
-#else
- Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -16));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -12)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -12));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -8));
- Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -4));
-#endif
- return;
-
- BuffersOverlap:
- // If the buffers overlap perfectly, there's no point to copying the data.
- if (Unsafe.AreSame(ref dest, ref src))
- {
- return;
- }
-
- PInvoke:
- _Memmove(ref dest, ref src, len);
- }
-
// Non-inlinable wrapper around the QCall that avoids polluting the fast path
// with P/Invoke prolog/epilog.
[MethodImpl(MethodImplOptions.NoInlining)]
- private static unsafe void _Memmove(ref byte dest, ref byte src, nuint len)
+ internal static unsafe void _Memmove(ref byte dest, ref byte src, nuint len)
{
fixed (byte* pDest = &dest)
fixed (byte* pSrc = &src)
__Memmove(pDest, pSrc, len);
}
-#if HAS_CUSTOM_BLOCKS
- [StructLayout(LayoutKind.Sequential, Size = 16)]
- private struct Block16 { }
-
- [StructLayout(LayoutKind.Sequential, Size = 64)]
- private struct Block64 { }
-#endif // HAS_CUSTOM_BLOCKS
-
// Non-inlinable wrapper around the QCall that avoids polluting the fast path
// with P/Invoke prolog/epilog.
[MethodImpl(MethodImplOptions.NoInlining)]
@@ -370,7 +154,7 @@ namespace System
if (!RuntimeHelpers.IsReferenceOrContainsReferences<T>())
{
// Blittable memmove
- Memmove(
+ SpanHelpers.Memmove(
ref Unsafe.As<T, byte>(ref destination),
ref Unsafe.As<T, byte>(ref source),
elementCount * (nuint)sizeof(T));
@@ -401,7 +185,6 @@ namespace System
_BulkMoveWithWriteBarrier(ref destination, ref source, byteCount);
}
-#pragma warning disable IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
// Non-inlinable wrapper around the loop for copying large blocks in chunks
[MethodImpl(MethodImplOptions.NoInlining)]
private static void _BulkMoveWithWriteBarrier(ref byte destination, ref byte source, nuint byteCount)
@@ -436,7 +219,6 @@ namespace System
}
__BulkMoveWithWriteBarrier(ref destination, ref source, byteCount);
}
-#pragma warning restore IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
#endif // !MONO
}
diff --git a/src/libraries/System.Private.CoreLib/src/System/DateTime.cs b/src/libraries/System.Private.CoreLib/src/System/DateTime.cs
index efcf7155c0f1..3eeaaabbd358 100644
--- a/src/libraries/System.Private.CoreLib/src/System/DateTime.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/DateTime.cs
@@ -332,6 +332,7 @@ namespace System
else
{
// if we have a leap second, then we adjust it to 59 so that DateTime will consider it the last in the specified minute.
+ // codeql[cs/leap-year/unsafe-date-construction-from-two-elements] - DateTime is constructed using the user specified values, not a combination of different sources. It would be intentional to throw if an invalid combination occurred.
this = new DateTime(year, month, day, hour, minute, 59);
ValidateLeapSecond();
}
diff --git a/src/libraries/System.Private.CoreLib/src/System/Globalization/GregorianCalendarHelper.cs b/src/libraries/System.Private.CoreLib/src/System/Globalization/GregorianCalendarHelper.cs
index b100b633e9ac..04298c12e7f6 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Globalization/GregorianCalendarHelper.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Globalization/GregorianCalendarHelper.cs
@@ -35,6 +35,7 @@ namespace System.Globalization
this.yearOffset = yearOffset;
this.minEraYear = minEraYear;
this.maxEraYear = maxEraYear;
+ // codeql[cs/leap-year/unsafe-date-construction-from-two-elements] - A DateTime object is created using values obtained from the machine configuration.
this.ticks = new DateTime(startYear, startMonth, startDay).Ticks;
this.eraName = eraName;
this.abbrevEraName = abbrevEraName;
diff --git a/src/libraries/System.Private.CoreLib/src/System/Globalization/TimeSpanFormat.cs b/src/libraries/System.Private.CoreLib/src/System/Globalization/TimeSpanFormat.cs
index 4e11f14c8090..8c8bea12bbf6 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Globalization/TimeSpanFormat.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Globalization/TimeSpanFormat.cs
@@ -268,12 +268,11 @@ namespace System.Globalization
// Write fraction and separator, if necessary
if (fractionDigits != 0)
{
- Debug.Assert(format == StandardFormat.C || decimalSeparator != null);
if (format == StandardFormat.C)
{
*p++ = TChar.CastFrom('.');
}
- else if (decimalSeparator!.Length == 1)
+ else if (decimalSeparator.Length == 1)
{
*p++ = decimalSeparator[0];
}
diff --git a/src/libraries/System.Private.CoreLib/src/System/IO/UnmanagedMemoryStream.cs b/src/libraries/System.Private.CoreLib/src/System/IO/UnmanagedMemoryStream.cs
index b1b18a2c3431..68adbf72bc6b 100644
--- a/src/libraries/System.Private.CoreLib/src/System/IO/UnmanagedMemoryStream.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/IO/UnmanagedMemoryStream.cs
@@ -390,7 +390,7 @@ namespace System.IO
try
{
_buffer.AcquirePointer(ref pointer);
- Buffer.Memmove(ref MemoryMarshal.GetReference(buffer), ref *(pointer + pos + _offset), (nuint)nInt);
+ SpanHelpers.Memmove(ref MemoryMarshal.GetReference(buffer), ref *(pointer + pos + _offset), (nuint)nInt);
}
finally
{
@@ -402,7 +402,7 @@ namespace System.IO
}
else
{
- Buffer.Memmove(ref MemoryMarshal.GetReference(buffer), ref *(_mem + pos), (nuint)nInt);
+ SpanHelpers.Memmove(ref MemoryMarshal.GetReference(buffer), ref *(_mem + pos), (nuint)nInt);
}
}
@@ -669,7 +669,7 @@ namespace System.IO
try
{
_buffer.AcquirePointer(ref pointer);
- Buffer.Memmove(ref *(pointer + pos + _offset), ref MemoryMarshal.GetReference(buffer), (nuint)buffer.Length);
+ SpanHelpers.Memmove(ref *(pointer + pos + _offset), ref MemoryMarshal.GetReference(buffer), (nuint)buffer.Length);
}
finally
{
@@ -681,7 +681,7 @@ namespace System.IO
}
else
{
- Buffer.Memmove(ref *(_mem + pos), ref MemoryMarshal.GetReference(buffer), (nuint)buffer.Length);
+ SpanHelpers.Memmove(ref *(_mem + pos), ref MemoryMarshal.GetReference(buffer), (nuint)buffer.Length);
}
_position = n;
diff --git a/src/libraries/System.Private.CoreLib/src/System/Number.Formatting.cs b/src/libraries/System.Private.CoreLib/src/System/Number.Formatting.cs
index 3bb61ebcf736..c88840367773 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Number.Formatting.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Number.Formatting.cs
@@ -3498,7 +3498,6 @@ namespace System
{
if (groupDigits != null)
{
- Debug.Assert(sGroup != null, "Must be null when groupDigits != null");
int groupSizeIndex = 0; // Index into the groupDigits array.
int bufferSize = digPos; // The length of the result buffer string.
int groupSize = 0; // The current group size.
@@ -3583,7 +3582,6 @@ namespace System
if (nMaxDigits > 0)
{
- Debug.Assert(sDecimal != null);
vlb.Append(sDecimal);
if ((digPos < 0) && (nMaxDigits > 0))
{
diff --git a/src/libraries/System.Private.CoreLib/src/System/Numerics/Vector_1.cs b/src/libraries/System.Private.CoreLib/src/System/Numerics/Vector_1.cs
index efedc7f7d18d..4ad327aebc9e 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Numerics/Vector_1.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Numerics/Vector_1.cs
@@ -181,6 +181,7 @@ namespace System.Numerics
/// <returns><c>true</c> if <typeparamref name="T" /> is supported; otherwise, <c>false</c>.</returns>
public static bool IsSupported
{
+ [Intrinsic]
[MethodImpl(MethodImplOptions.AggressiveInlining)]
get => (typeof(T) == typeof(byte)) ||
(typeof(T) == typeof(double)) ||
diff --git a/src/libraries/System.Private.CoreLib/src/System/ReadOnlySpan.cs b/src/libraries/System.Private.CoreLib/src/System/ReadOnlySpan.cs
index ffef2df0989b..b182d42b66ec 100644
--- a/src/libraries/System.Private.CoreLib/src/System/ReadOnlySpan.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/ReadOnlySpan.cs
@@ -120,7 +120,6 @@ namespace System
_length = 1;
}
-#pragma warning disable IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
// Constructor for internal use only. It is not safe to expose publicly, and is instead exposed via the unsafe MemoryMarshal.CreateReadOnlySpan.
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal ReadOnlySpan(ref T reference, int length)
@@ -130,7 +129,6 @@ namespace System
_reference = ref reference;
_length = length;
}
-#pragma warning restore IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
/// <summary>
/// Returns the specified element of the read-only span.
diff --git a/src/libraries/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.cs b/src/libraries/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.cs
index e4aa24dd7edd..7cfe087de94c 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.cs
@@ -269,6 +269,11 @@ namespace System.Resources
}
}
+ private static Assembly? InternalGetSatelliteAssembly(Assembly mainAssembly, CultureInfo culture, Version? version)
+ {
+ return RuntimeAssembly.InternalGetSatelliteAssembly(mainAssembly, culture, version, throwOnFileNotFound: false);
+ }
+
[RequiresUnreferencedCode("The CustomResourceTypesSupport feature switch has been enabled for this app which is being trimmed. " +
"Custom readers as well as custom objects on the resources file are not observable by the trimmer and so required assemblies, types and members may be removed.")]
private static ResourceSet InternalGetResourceSetFromSerializedData(Stream store, string readerTypeName, string? resSetTypeName, ResourceManager.ResourceManagerMediator mediator)
diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.cs
index cf2135908514..38b923764e97 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.cs
@@ -50,7 +50,7 @@ namespace System.Runtime.CompilerServices
}
// In either case, the newly-allocated array is the exact same type as the
- // original incoming array. It's safe for us to Buffer.Memmove the contents
+ // original incoming array. It's safe for us to SpanHelpers.Memmove the contents
// from the source array to the destination array, otherwise the contents
// wouldn't have been valid for the source array in the first place.
@@ -125,9 +125,6 @@ namespace System.Runtime.CompilerServices
[Intrinsic]
internal static bool IsKnownConstant(char t) => false;
-
- [Intrinsic]
- internal static bool IsKnownConstant(int t) => false;
#pragma warning restore IDE0060
}
}
diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/MemoryMarshal.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/MemoryMarshal.cs
index 2fc9946f0380..0b0da448e4eb 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/MemoryMarshal.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/MemoryMarshal.cs
@@ -87,7 +87,6 @@ namespace System.Runtime.InteropServices
/// </summary>
public static ref T GetReference<T>(ReadOnlySpan<T> span) => ref span._reference;
-#pragma warning disable IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
/// <summary>
/// Returns a reference to the 0th element of the Span. If the Span is empty, returns a reference to fake non-null pointer. Such a reference can be used
/// for pinning but must never be dereferenced. This is useful for interop with methods that do not accept null pointers for zero-sized buffers.
@@ -101,7 +100,6 @@ namespace System.Runtime.InteropServices
/// </summary>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal static unsafe ref T GetNonNullPinnableReference<T>(ReadOnlySpan<T> span) => ref (span.Length != 0) ? ref Unsafe.AsRef(in span._reference) : ref Unsafe.AsRef<T>((void*)1);
-#pragma warning restore IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
/// <summary>
/// Casts a Span of one primitive type <typeparamref name="TFrom"/> to another primitive type <typeparamref name="TTo"/>.
diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/NativeMemory.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/NativeMemory.cs
index 069d67e5e462..7fb4af35480a 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/NativeMemory.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/NativeMemory.cs
@@ -61,7 +61,7 @@ namespace System.Runtime.InteropServices
[CLSCompliant(false)]
public static void Copy(void* source, void* destination, nuint byteCount)
{
- Buffer.Memmove(ref *(byte*)destination, ref *(byte*)source, byteCount);
+ SpanHelpers.Memmove(ref *(byte*)destination, ref *(byte*)source, byteCount);
}
/// <summary>
diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/SafeBuffer.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/SafeBuffer.cs
index d35b5dd174fc..76858298feb2 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/SafeBuffer.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/SafeBuffer.cs
@@ -194,7 +194,7 @@ namespace System.Runtime.InteropServices
{
DangerousAddRef(ref mustCallRelease);
- Buffer.Memmove(ref Unsafe.As<T, byte>(ref value), ref *ptr, sizeofT);
+ SpanHelpers.Memmove(ref Unsafe.As<T, byte>(ref value), ref *ptr, sizeofT);
}
finally
{
@@ -281,7 +281,7 @@ namespace System.Runtime.InteropServices
{
DangerousAddRef(ref mustCallRelease);
- Buffer.Memmove(ref *ptr, ref Unsafe.As<T, byte>(ref value), sizeofT);
+ SpanHelpers.Memmove(ref *ptr, ref Unsafe.As<T, byte>(ref value), sizeofT);
}
finally
{
diff --git a/src/libraries/System.Private.CoreLib/src/System/SearchValues/IndexOfAnyAsciiSearcher.cs b/src/libraries/System.Private.CoreLib/src/System/SearchValues/IndexOfAnyAsciiSearcher.cs
index bc8b3fd0c6c8..deccdddb8fec 100644
--- a/src/libraries/System.Private.CoreLib/src/System/SearchValues/IndexOfAnyAsciiSearcher.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/SearchValues/IndexOfAnyAsciiSearcher.cs
@@ -10,7 +10,6 @@ using System.Runtime.Intrinsics.Wasm;
using System.Runtime.Intrinsics.X86;
#pragma warning disable 8500 // sizeof of managed types
-#pragma warning disable IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
namespace System.Buffers
{
diff --git a/src/libraries/System.Private.CoreLib/src/System/SearchValues/ProbabilisticMap.cs b/src/libraries/System.Private.CoreLib/src/System/SearchValues/ProbabilisticMap.cs
index 150372914d8b..076340bebeba 100644
--- a/src/libraries/System.Private.CoreLib/src/System/SearchValues/ProbabilisticMap.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/SearchValues/ProbabilisticMap.cs
@@ -10,7 +10,6 @@ using System.Runtime.Intrinsics.Arm;
using System.Runtime.Intrinsics.Wasm;
using System.Runtime.Intrinsics.X86;
-#pragma warning disable IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
namespace System.Buffers
{
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Security/DynamicSecurityMethodAttribute.cs b/src/libraries/System.Private.CoreLib/src/System/Security/DynamicSecurityMethodAttribute.cs
index e3dae854517e..e3dae854517e 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Security/DynamicSecurityMethodAttribute.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Security/DynamicSecurityMethodAttribute.cs
diff --git a/src/libraries/System.Private.CoreLib/src/System/Span.cs b/src/libraries/System.Private.CoreLib/src/System/Span.cs
index aaf3763d81b7..38b94e872b0c 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Span.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Span.cs
@@ -126,7 +126,6 @@ namespace System
_length = 1;
}
-#pragma warning disable IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
// Constructor for internal use only. It is not safe to expose publicly, and is instead exposed via the unsafe MemoryMarshal.CreateSpan.
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal Span(ref T reference, int length)
@@ -136,7 +135,6 @@ namespace System
_reference = ref reference;
_length = length;
}
-#pragma warning restore IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
/// <summary>
/// Returns a reference to specified element of the Span.
@@ -300,19 +298,7 @@ namespace System
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public unsafe void Fill(T value)
{
- if (sizeof(T) == 1)
- {
- // Special-case single-byte types like byte / sbyte / bool.
- // The runtime eventually calls memset, which can efficiently support large buffers.
- // We don't need to check IsReferenceOrContainsReferences because no references
- // can ever be stored in types this small.
- Unsafe.InitBlockUnaligned(ref Unsafe.As<T, byte>(ref _reference), *(byte*)&value, (uint)_length);
- }
- else
- {
- // Call our optimized workhorse method for all other types.
- SpanHelpers.Fill(ref _reference, (uint)_length, value);
- }
+ SpanHelpers.Fill(ref _reference, (uint)_length, value);
}
/// <summary>
diff --git a/src/libraries/System.Private.CoreLib/src/System/SpanHelpers.ByteMemOps.cs b/src/libraries/System.Private.CoreLib/src/System/SpanHelpers.ByteMemOps.cs
new file mode 100644
index 000000000000..542945920992
--- /dev/null
+++ b/src/libraries/System.Private.CoreLib/src/System/SpanHelpers.ByteMemOps.cs
@@ -0,0 +1,537 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#if TARGET_AMD64 || TARGET_ARM64 || (TARGET_32BIT && !TARGET_ARM) || TARGET_LOONGARCH64
+// JIT is guaranteed to unroll blocks up to 64 bytes in size
+#define HAS_CUSTOM_BLOCKS
+#endif
+
+using System.Diagnostics;
+using System.Numerics;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+namespace System
+{
+ internal static partial class SpanHelpers // .ByteMemOps
+ {
+#if TARGET_ARM64 || TARGET_LOONGARCH64
+ private const ulong MemmoveNativeThreshold = ulong.MaxValue;
+#elif TARGET_ARM
+ private const nuint MemmoveNativeThreshold = 512;
+#else
+ private const nuint MemmoveNativeThreshold = 2048;
+#endif
+ private const nuint ZeroMemoryNativeThreshold = 1024;
+
+
+#if HAS_CUSTOM_BLOCKS
+ [StructLayout(LayoutKind.Sequential, Size = 16)]
+ private struct Block16 {}
+
+ [StructLayout(LayoutKind.Sequential, Size = 64)]
+ private struct Block64 {}
+#endif // HAS_CUSTOM_BLOCKS
+
+#if NATIVEAOT
+ [System.Runtime.RuntimeExport("RhSpanHelpers_MemCopy")]
+#endif
+ [Intrinsic] // Unrolled for small constant lengths
+ internal static unsafe void Memmove(ref byte dest, ref byte src, nuint len)
+ {
+ // P/Invoke into the native version when the buffers are overlapping.
+ if ((nuint)Unsafe.ByteOffset(ref src, ref dest) < len ||
+ (nuint)Unsafe.ByteOffset(ref dest, ref src) < len)
+ {
+ goto BuffersOverlap;
+ }
+
+ ref byte srcEnd = ref Unsafe.Add(ref src, len);
+ ref byte destEnd = ref Unsafe.Add(ref dest, len);
+
+ if (len <= 16)
+ goto MCPY02;
+ if (len > 64)
+ goto MCPY05;
+
+ MCPY00:
+ // Copy bytes which are multiples of 16 and leave the remainder for MCPY01 to handle.
+ Debug.Assert(len > 16 && len <= 64);
+#if HAS_CUSTOM_BLOCKS
+ Unsafe.As<byte, Block16>(ref dest) = Unsafe.As<byte, Block16>(ref src); // [0,16]
+#elif TARGET_64BIT
+ Unsafe.As<byte, long>(ref dest) = Unsafe.As<byte, long>(ref src);
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 8)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 8)); // [0,16]
+#else
+ Unsafe.As<byte, int>(ref dest) = Unsafe.As<byte, int>(ref src);
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 4));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 8)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 8));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 12)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 12)); // [0,16]
+#endif
+ if (len <= 32)
+ goto MCPY01;
+#if HAS_CUSTOM_BLOCKS
+ Unsafe.As<byte, Block16>(ref Unsafe.Add(ref dest, 16)) = Unsafe.As<byte, Block16>(ref Unsafe.Add(ref src, 16)); // [0,32]
+#elif TARGET_64BIT
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 16)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 16));
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 24)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 24)); // [0,32]
+#else
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 16)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 16));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 20)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 20));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 24)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 24));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 28)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 28)); // [0,32]
+#endif
+ if (len <= 48)
+ goto MCPY01;
+#if HAS_CUSTOM_BLOCKS
+ Unsafe.As<byte, Block16>(ref Unsafe.Add(ref dest, 32)) = Unsafe.As<byte, Block16>(ref Unsafe.Add(ref src, 32)); // [0,48]
+#elif TARGET_64BIT
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 32)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 32));
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 40)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 40)); // [0,48]
+#else
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 32)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 32));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 36)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 36));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 40)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 40));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 44)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 44)); // [0,48]
+#endif
+
+ MCPY01:
+ // Unconditionally copy the last 16 bytes using destEnd and srcEnd and return.
+ Debug.Assert(len > 16 && len <= 64);
+#if HAS_CUSTOM_BLOCKS
+ Unsafe.As<byte, Block16>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, Block16>(ref Unsafe.Add(ref srcEnd, -16));
+#elif TARGET_64BIT
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref srcEnd, -16));
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref srcEnd, -8));
+#else
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -16));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -12)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -12));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -8));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -4));
+#endif
+ return;
+
+ MCPY02:
+ // Copy the first 8 bytes and then unconditionally copy the last 8 bytes and return.
+ if ((len & 24) == 0)
+ goto MCPY03;
+ Debug.Assert(len >= 8 && len <= 16);
+#if TARGET_64BIT
+ Unsafe.As<byte, long>(ref dest) = Unsafe.As<byte, long>(ref src);
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref srcEnd, -8));
+#else
+ Unsafe.As<byte, int>(ref dest) = Unsafe.As<byte, int>(ref src);
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 4));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -8));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -4));
+#endif
+ return;
+
+ MCPY03:
+ // Copy the first 4 bytes and then unconditionally copy the last 4 bytes and return.
+ if ((len & 4) == 0)
+ goto MCPY04;
+ Debug.Assert(len >= 4 && len < 8);
+ Unsafe.As<byte, int>(ref dest) = Unsafe.As<byte, int>(ref src);
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -4));
+ return;
+
+ MCPY04:
+ // Copy the first byte. For pending bytes, do an unconditionally copy of the last 2 bytes and return.
+ Debug.Assert(len < 4);
+ if (len == 0)
+ return;
+ dest = src;
+ if ((len & 2) == 0)
+ return;
+ Unsafe.As<byte, short>(ref Unsafe.Add(ref destEnd, -2)) = Unsafe.As<byte, short>(ref Unsafe.Add(ref srcEnd, -2));
+ return;
+
+ MCPY05:
+ // PInvoke to the native version when the copy length exceeds the threshold.
+ if (len > MemmoveNativeThreshold)
+ {
+ goto PInvoke;
+ }
+
+#if HAS_CUSTOM_BLOCKS
+ if (len >= 256)
+ {
+ // Try to opportunistically align the destination below. The input isn't pinned, so the GC
+ // is free to move the references. We're therefore assuming that reads may still be unaligned.
+ //
+ // dest is more important to align than src because an unaligned store is more expensive
+ // than an unaligned load.
+ nuint misalignedElements = 64 - (nuint)Unsafe.AsPointer(ref dest) & 63;
+ Unsafe.As<byte, Block64>(ref dest) = Unsafe.As<byte, Block64>(ref src);
+ src = ref Unsafe.Add(ref src, misalignedElements);
+ dest = ref Unsafe.Add(ref dest, misalignedElements);
+ len -= misalignedElements;
+ }
+#endif
+
+ // Copy 64-bytes at a time until the remainder is less than 64.
+ // If remainder is greater than 16 bytes, then jump to MCPY00. Otherwise, unconditionally copy the last 16 bytes and return.
+ Debug.Assert(len > 64 && len <= MemmoveNativeThreshold);
+ nuint n = len >> 6;
+
+ MCPY06:
+#if HAS_CUSTOM_BLOCKS
+ Unsafe.As<byte, Block64>(ref dest) = Unsafe.As<byte, Block64>(ref src);
+#elif TARGET_64BIT
+ Unsafe.As<byte, long>(ref dest) = Unsafe.As<byte, long>(ref src);
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 8)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 8));
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 16)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 16));
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 24)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 24));
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 32)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 32));
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 40)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 40));
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 48)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 48));
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref dest, 56)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src, 56));
+#else
+ Unsafe.As<byte, int>(ref dest) = Unsafe.As<byte, int>(ref src);
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 4));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 8)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 8));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 12)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 12));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 16)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 16));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 20)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 20));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 24)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 24));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 28)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 28));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 32)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 32));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 36)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 36));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 40)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 40));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 44)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 44));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 48)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 48));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 52)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 52));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 56)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 56));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref dest, 60)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src, 60));
+#endif
+ dest = ref Unsafe.Add(ref dest, 64);
+ src = ref Unsafe.Add(ref src, 64);
+ n--;
+ if (n != 0)
+ goto MCPY06;
+
+ len %= 64;
+ if (len > 16)
+ goto MCPY00;
+#if HAS_CUSTOM_BLOCKS
+ Unsafe.As<byte, Block16>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, Block16>(ref Unsafe.Add(ref srcEnd, -16));
+#elif TARGET_64BIT
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref srcEnd, -16));
+ Unsafe.As<byte, long>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref srcEnd, -8));
+#else
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -16));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -12)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -12));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -8));
+ Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -4));
+#endif
+ return;
+
+ BuffersOverlap:
+ Debug.Assert(len > 0);
+ // If the buffers overlap perfectly, there's no point to copying the data.
+ if (Unsafe.AreSame(ref dest, ref src))
+ {
+ // Both could be null with a non-zero length, perform an implicit null check.
+ _ = Unsafe.ReadUnaligned<byte>(ref dest);
+ return;
+ }
+
+ PInvoke:
+ // Implicit nullchecks
+ Debug.Assert(len > 0);
+ _ = Unsafe.ReadUnaligned<byte>(ref dest);
+ _ = Unsafe.ReadUnaligned<byte>(ref src);
+ Buffer._Memmove(ref dest, ref src, len);
+ }
+
+#if NATIVEAOT
+ [System.Runtime.RuntimeExport("RhSpanHelpers_MemZero")]
+#endif
+ [Intrinsic] // Unrolled for small sizes
+ public static unsafe void ClearWithoutReferences(ref byte dest, nuint len)
+ {
+ if (len == 0)
+ return;
+
+ ref byte destEnd = ref Unsafe.Add(ref dest, len);
+
+ if (len <= 16)
+ goto MZER02;
+ if (len > 64)
+ goto MZER05;
+
+ MZER00:
+ // Clear bytes which are multiples of 16 and leave the remainder for MZER01 to handle.
+ Debug.Assert(len > 16 && len <= 64);
+#if HAS_CUSTOM_BLOCKS
+ Unsafe.WriteUnaligned<Block16>(ref dest, default);
+#elif TARGET_64BIT
+ Unsafe.WriteUnaligned<long>(ref dest, 0);
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref dest, 8), 0);
+#else
+ Unsafe.WriteUnaligned<int>(ref dest, 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 4), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 8), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 12), 0);
+#endif
+ if (len <= 32)
+ goto MZER01;
+#if HAS_CUSTOM_BLOCKS
+ Unsafe.WriteUnaligned<Block16>(ref Unsafe.Add(ref dest, 16), default);
+#elif TARGET_64BIT
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref dest, 16), 0);
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref dest, 24), 0);
+#else
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 16), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 20), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 24), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 28), 0);
+#endif
+ if (len <= 48)
+ goto MZER01;
+#if HAS_CUSTOM_BLOCKS
+ Unsafe.WriteUnaligned<Block16>(ref Unsafe.Add(ref dest, 32), default);
+#elif TARGET_64BIT
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref dest, 32), 0);
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref dest, 40), 0);
+#else
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 32), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 36), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 40), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 44), 0);
+#endif
+
+ MZER01:
+ // Unconditionally clear the last 16 bytes using destEnd and return.
+ Debug.Assert(len > 16 && len <= 64);
+#if HAS_CUSTOM_BLOCKS
+ Unsafe.WriteUnaligned<Block16>(ref Unsafe.Add(ref destEnd, -16), default);
+#elif TARGET_64BIT
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref destEnd, -16), 0);
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref destEnd, -8), 0);
+#else
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref destEnd, -16), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref destEnd, -12), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref destEnd, -8), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref destEnd, -4), 0);
+#endif
+ return;
+
+ MZER02:
+ // Clear the first 8 bytes and then unconditionally clear the last 8 bytes and return.
+ if ((len & 24) == 0)
+ goto MZER03;
+ Debug.Assert(len >= 8 && len <= 16);
+#if TARGET_64BIT
+ Unsafe.WriteUnaligned<long>(ref dest, 0);
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref destEnd, -8), 0);
+#else
+ Unsafe.WriteUnaligned<int>(ref dest, 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 4), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref destEnd, -8), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref destEnd, -4), 0);
+#endif
+ return;
+
+ MZER03:
+ // Clear the first 4 bytes and then unconditionally clear the last 4 bytes and return.
+ if ((len & 4) == 0)
+ goto MZER04;
+ Debug.Assert(len >= 4 && len < 8);
+ Unsafe.WriteUnaligned<int>(ref dest, 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref destEnd, -4), 0);
+ return;
+
+ MZER04:
+ // Clear the first byte. For pending bytes, do an unconditionally clear of the last 2 bytes and return.
+ Debug.Assert(len < 4);
+ if (len == 0)
+ return;
+ dest = 0;
+ if ((len & 2) == 0)
+ return;
+ Unsafe.WriteUnaligned<short>(ref Unsafe.Add(ref destEnd, -2), 0);
+ return;
+
+ MZER05:
+ // PInvoke to the native version when the clear length exceeds the threshold.
+ if (len > ZeroMemoryNativeThreshold)
+ {
+ goto PInvoke;
+ }
+
+#if HAS_CUSTOM_BLOCKS
+ if (len >= 256)
+ {
+ // Try to opportunistically align the destination below. The input isn't pinned, so the GC
+ // is free to move the references. We're therefore assuming that reads may still be unaligned.
+ nuint misalignedElements = 64 - (nuint)Unsafe.AsPointer(ref dest) & 63;
+ Unsafe.WriteUnaligned<Block64>(ref dest, default);
+ dest = ref Unsafe.Add(ref dest, misalignedElements);
+ len -= misalignedElements;
+ }
+#endif
+ // Clear 64-bytes at a time until the remainder is less than 64.
+ // If remainder is greater than 16 bytes, then jump to MZER00. Otherwise, unconditionally clear the last 16 bytes and return.
+ Debug.Assert(len > 64 && len <= ZeroMemoryNativeThreshold);
+ nuint n = len >> 6;
+
+ MZER06:
+#if HAS_CUSTOM_BLOCKS
+ Unsafe.WriteUnaligned<Block64>(ref dest, default);
+#elif TARGET_64BIT
+ Unsafe.WriteUnaligned<long>(ref dest, 0);
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref dest, 8), 0);
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref dest, 16), 0);
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref dest, 24), 0);
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref dest, 32), 0);
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref dest, 40), 0);
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref dest, 48), 0);
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref dest, 56), 0);
+#else
+ Unsafe.WriteUnaligned<int>(ref dest, 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 4), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 8), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 12), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 16), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 20), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 24), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 28), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 32), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 36), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 40), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 44), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 48), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 52), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 56), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref dest, 60), 0);
+#endif
+ dest = ref Unsafe.Add(ref dest, 64);
+ n--;
+ if (n != 0)
+ goto MZER06;
+
+ len %= 64;
+ if (len > 16)
+ goto MZER00;
+#if HAS_CUSTOM_BLOCKS
+ Unsafe.WriteUnaligned<Block16>(ref Unsafe.Add(ref destEnd, -16), default);
+#elif TARGET_64BIT
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref destEnd, -16), 0);
+ Unsafe.WriteUnaligned<long>(ref Unsafe.Add(ref destEnd, -8), 0);
+#else
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref destEnd, -16), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref destEnd, -12), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref destEnd, -8), 0);
+ Unsafe.WriteUnaligned<int>(ref Unsafe.Add(ref destEnd, -4), 0);
+#endif
+ return;
+
+ PInvoke:
+ // Implicit nullchecks
+ _ = Unsafe.ReadUnaligned<byte>(ref dest);
+ Buffer._ZeroMemory(ref dest, len);
+ }
+
+#if NATIVEAOT
+ [System.Runtime.RuntimeExport("RhSpanHelpers_MemSet")]
+#endif
+ internal static void Fill(ref byte dest, byte value, nuint len)
+ {
+ if (!Vector.IsHardwareAccelerated)
+ {
+ goto CannotVectorize;
+ }
+
+ if (len >= (nuint)Vector<byte>.Count)
+ {
+ // We have enough data for at least one vectorized write.
+ Vector<byte> vector = new (value);
+ nuint stopLoopAtOffset = len & (nuint)(nint)(2 * (int)-Vector<byte>.Count); // intentional sign extension carries the negative bit
+ nuint offset = 0;
+
+ // Loop, writing 2 vectors at a time.
+ // Compare 'numElements' rather than 'stopLoopAtOffset' because we don't want a dependency
+ // on the very recently calculated 'stopLoopAtOffset' value.
+ if (len >= (uint)(2 * Vector<byte>.Count))
+ {
+ do
+ {
+ Unsafe.WriteUnaligned(ref Unsafe.AddByteOffset(ref dest, offset), vector);
+ Unsafe.WriteUnaligned(ref Unsafe.AddByteOffset(ref dest, offset + (nuint)Vector<byte>.Count), vector);
+ offset += (uint)(2 * Vector<byte>.Count);
+ } while (offset < stopLoopAtOffset);
+ }
+
+ // At this point, if any data remains to be written, it's strictly less than
+ // 2 * sizeof(Vector) bytes. The loop above had us write an even number of vectors.
+ // If the total byte length instead involves us writing an odd number of vectors, write
+ // one additional vector now. The bit check below tells us if we're in an "odd vector
+ // count" situation.
+ if ((len & (nuint)Vector<byte>.Count) != 0)
+ {
+ Unsafe.WriteUnaligned(ref Unsafe.AddByteOffset(ref dest, offset), vector);
+ }
+
+ // It's possible that some small buffer remains to be populated - something that won't
+ // fit an entire vector's worth of data. Instead of falling back to a loop, we'll write
+ // a vector at the very end of the buffer. This may involve overwriting previously
+ // populated data, which is fine since we're splatting the same value for all entries.
+ // There's no need to perform a length check here because we already performed this
+ // check before entering the vectorized code path.
+ Unsafe.WriteUnaligned(ref Unsafe.AddByteOffset(ref dest, len - (nuint)Vector<byte>.Count), vector);
+
+ // And we're done!
+ return;
+ }
+
+ CannotVectorize:
+
+ // If we reached this point, we cannot vectorize this T, or there are too few
+ // elements for us to vectorize. Fall back to an unrolled loop.
+ nuint i = 0;
+
+ // Write 8 elements at a time
+ if (len >= 8)
+ {
+ nuint stopLoopAtOffset = len & ~(nuint)7;
+ do
+ {
+ Unsafe.Add(ref dest, (nint)i + 0) = value;
+ Unsafe.Add(ref dest, (nint)i + 1) = value;
+ Unsafe.Add(ref dest, (nint)i + 2) = value;
+ Unsafe.Add(ref dest, (nint)i + 3) = value;
+ Unsafe.Add(ref dest, (nint)i + 4) = value;
+ Unsafe.Add(ref dest, (nint)i + 5) = value;
+ Unsafe.Add(ref dest, (nint)i + 6) = value;
+ Unsafe.Add(ref dest, (nint)i + 7) = value;
+ } while ((i += 8) < stopLoopAtOffset);
+ }
+
+ // Write next 4 elements if needed
+ if ((len & 4) != 0)
+ {
+ Unsafe.Add(ref dest, (nint)i + 0) = value;
+ Unsafe.Add(ref dest, (nint)i + 1) = value;
+ Unsafe.Add(ref dest, (nint)i + 2) = value;
+ Unsafe.Add(ref dest, (nint)i + 3) = value;
+ i += 4;
+ }
+
+ // Write next 2 elements if needed
+ if ((len & 2) != 0)
+ {
+ Unsafe.Add(ref dest, (nint)i + 0) = value;
+ Unsafe.Add(ref dest, (nint)i + 1) = value;
+ i += 2;
+ }
+
+ // Write final element if needed
+ if ((len & 1) != 0)
+ {
+ Unsafe.Add(ref dest, (nint)i) = value;
+ }
+ }
+ }
+}
diff --git a/src/libraries/System.Private.CoreLib/src/System/SpanHelpers.Packed.cs b/src/libraries/System.Private.CoreLib/src/System/SpanHelpers.Packed.cs
index 69ce8c1f7fad..37f90c695090 100644
--- a/src/libraries/System.Private.CoreLib/src/System/SpanHelpers.Packed.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/SpanHelpers.Packed.cs
@@ -7,7 +7,6 @@ using System.Runtime.CompilerServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.X86;
-#pragma warning disable IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
#pragma warning disable 8500 // sizeof of managed types
diff --git a/src/libraries/System.Private.CoreLib/src/System/SpanHelpers.T.cs b/src/libraries/System.Private.CoreLib/src/System/SpanHelpers.T.cs
index da77d42320a9..ee378b7646b5 100644
--- a/src/libraries/System.Private.CoreLib/src/System/SpanHelpers.T.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/SpanHelpers.T.cs
@@ -8,7 +8,6 @@ using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
-#pragma warning disable IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
#pragma warning disable 8500 // sizeof of managed types
@@ -16,6 +15,7 @@ namespace System
{
internal static partial class SpanHelpers // .T
{
+ [Intrinsic] // Unrolled for small sizes
public static unsafe void Fill<T>(ref T refData, nuint numElements, T value)
{
// Early checks to see if it's even possible to vectorize - JIT will turn these checks into consts.
diff --git a/src/libraries/System.Private.CoreLib/src/System/SpanHelpers.cs b/src/libraries/System.Private.CoreLib/src/System/SpanHelpers.cs
index a7e5f48d6318..b770e55cae4f 100644
--- a/src/libraries/System.Private.CoreLib/src/System/SpanHelpers.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/SpanHelpers.cs
@@ -12,327 +12,6 @@ namespace System
{
internal static partial class SpanHelpers
{
- public static unsafe void ClearWithoutReferences(ref byte b, nuint byteLength)
- {
- if (byteLength == 0)
- return;
-
-#if TARGET_AMD64 || TARGET_ARM64 || TARGET_LOONGARCH64
- // The exact matrix on when ZeroMemory is faster than InitBlockUnaligned is very complex. The factors to consider include
- // type of hardware and memory alignment. This threshold was chosen as a good balance across different configurations.
- if (byteLength > 768)
- goto PInvoke;
- Unsafe.InitBlockUnaligned(ref b, 0, (uint)byteLength);
- return;
-#else
- // TODO: Optimize other platforms to be on par with AMD64 CoreCLR
- // Note: It's important that this switch handles lengths at least up to 22.
- // See notes below near the main loop for why.
-
- // The switch will be very fast since it can be implemented using a jump
- // table in assembly. See http://stackoverflow.com/a/449297/4077294 for more info.
-
- switch (byteLength)
- {
- case 1:
- b = 0;
- return;
- case 2:
- Unsafe.As<byte, short>(ref b) = 0;
- return;
- case 3:
- Unsafe.As<byte, short>(ref b) = 0;
- Unsafe.Add(ref b, 2) = 0;
- return;
- case 4:
- Unsafe.As<byte, int>(ref b) = 0;
- return;
- case 5:
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.Add(ref b, 4) = 0;
- return;
- case 6:
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, short>(ref Unsafe.Add(ref b, 4)) = 0;
- return;
- case 7:
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, short>(ref Unsafe.Add(ref b, 4)) = 0;
- Unsafe.Add(ref b, 6) = 0;
- return;
- case 8:
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref b) = 0;
-#else
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 4)) = 0;
-#endif
- return;
- case 9:
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref b) = 0;
-#else
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 4)) = 0;
-#endif
- Unsafe.Add(ref b, 8) = 0;
- return;
- case 10:
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref b) = 0;
-#else
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 4)) = 0;
-#endif
- Unsafe.As<byte, short>(ref Unsafe.Add(ref b, 8)) = 0;
- return;
- case 11:
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref b) = 0;
-#else
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 4)) = 0;
-#endif
- Unsafe.As<byte, short>(ref Unsafe.Add(ref b, 8)) = 0;
- Unsafe.Add(ref b, 10) = 0;
- return;
- case 12:
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref b) = 0;
-#else
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 4)) = 0;
-#endif
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 8)) = 0;
- return;
- case 13:
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref b) = 0;
-#else
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 4)) = 0;
-#endif
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 8)) = 0;
- Unsafe.Add(ref b, 12) = 0;
- return;
- case 14:
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref b) = 0;
-#else
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 4)) = 0;
-#endif
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 8)) = 0;
- Unsafe.As<byte, short>(ref Unsafe.Add(ref b, 12)) = 0;
- return;
- case 15:
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref b) = 0;
-#else
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 4)) = 0;
-#endif
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 8)) = 0;
- Unsafe.As<byte, short>(ref Unsafe.Add(ref b, 12)) = 0;
- Unsafe.Add(ref b, 14) = 0;
- return;
- case 16:
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref b) = 0;
- Unsafe.As<byte, long>(ref Unsafe.Add<byte>(ref b, 8)) = 0;
-#else
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 4)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 8)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 12)) = 0;
-#endif
- return;
- case 17:
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref b) = 0;
- Unsafe.As<byte, long>(ref Unsafe.Add<byte>(ref b, 8)) = 0;
-#else
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 4)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 8)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 12)) = 0;
-#endif
- Unsafe.Add(ref b, 16) = 0;
- return;
- case 18:
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref b) = 0;
- Unsafe.As<byte, long>(ref Unsafe.Add<byte>(ref b, 8)) = 0;
-#else
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 4)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 8)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 12)) = 0;
-#endif
- Unsafe.As<byte, short>(ref Unsafe.Add(ref b, 16)) = 0;
- return;
- case 19:
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref b) = 0;
- Unsafe.As<byte, long>(ref Unsafe.Add<byte>(ref b, 8)) = 0;
-#else
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 4)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 8)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 12)) = 0;
-#endif
- Unsafe.As<byte, short>(ref Unsafe.Add(ref b, 16)) = 0;
- Unsafe.Add(ref b, 18) = 0;
- return;
- case 20:
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref b) = 0;
- Unsafe.As<byte, long>(ref Unsafe.Add<byte>(ref b, 8)) = 0;
-#else
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 4)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 8)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 12)) = 0;
-#endif
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 16)) = 0;
- return;
- case 21:
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref b) = 0;
- Unsafe.As<byte, long>(ref Unsafe.Add<byte>(ref b, 8)) = 0;
-#else
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 4)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 8)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 12)) = 0;
-#endif
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 16)) = 0;
- Unsafe.Add(ref b, 20) = 0;
- return;
- case 22:
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref b) = 0;
- Unsafe.As<byte, long>(ref Unsafe.Add<byte>(ref b, 8)) = 0;
-#else
- Unsafe.As<byte, int>(ref b) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 4)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 8)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 12)) = 0;
-#endif
- Unsafe.As<byte, int>(ref Unsafe.Add(ref b, 16)) = 0;
- Unsafe.As<byte, short>(ref Unsafe.Add(ref b, 20)) = 0;
- return;
- }
-
- // P/Invoke into the native version for large lengths
- if (byteLength >= 512) goto PInvoke;
-
- nuint i = 0; // byte offset at which we're copying
-
- if (((nuint)Unsafe.AsPointer(ref b) & 3) != 0)
- {
- if (((nuint)Unsafe.AsPointer(ref b) & 1) != 0)
- {
- b = 0;
- i += 1;
- if (((nuint)Unsafe.AsPointer(ref b) & 2) != 0)
- goto IntAligned;
- }
- Unsafe.As<byte, short>(ref Unsafe.AddByteOffset(ref b, i)) = 0;
- i += 2;
- }
-
- IntAligned:
-
- // On 64-bit IntPtr.Size == 8, so we want to advance to the next 8-aligned address. If
- // (int)b % 8 is 0, 5, 6, or 7, we will already have advanced by 0, 3, 2, or 1
- // bytes to the next aligned address (respectively), so do nothing. On the other hand,
- // if it is 1, 2, 3, or 4 we will want to copy-and-advance another 4 bytes until
- // we're aligned.
- // The thing 1, 2, 3, and 4 have in common that the others don't is that if you
- // subtract one from them, their 3rd lsb will not be set. Hence, the below check.
-
- if ((((nuint)Unsafe.AsPointer(ref b) - 1) & 4) == 0)
- {
- Unsafe.As<byte, int>(ref Unsafe.AddByteOffset(ref b, i)) = 0;
- i += 4;
- }
-
- nuint end = byteLength - 16;
- byteLength -= i; // lower 4 bits of byteLength represent how many bytes are left *after* the unrolled loop
-
- // We know due to the above switch-case that this loop will always run 1 iteration; max
- // bytes we clear before checking is 23 (7 to align the pointers, 16 for 1 iteration) so
- // the switch handles lengths 0-22.
- Debug.Assert(end >= 7 && i <= end);
-
- // This is separated out into a different variable, so the i + 16 addition can be
- // performed at the start of the pipeline and the loop condition does not have
- // a dependency on the writes.
- nuint counter;
-
- do
- {
- counter = i + 16;
-
- // This loop looks very costly since there appear to be a bunch of temporary values
- // being created with the adds, but the jit (for x86 anyways) will convert each of
- // these to use memory addressing operands.
-
- // So the only cost is a bit of code size, which is made up for by the fact that
- // we save on writes to b.
-
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref Unsafe.AddByteOffset<byte>(ref b, i)) = 0;
- Unsafe.As<byte, long>(ref Unsafe.AddByteOffset<byte>(ref b, i + 8)) = 0;
-#else
- Unsafe.As<byte, int>(ref Unsafe.AddByteOffset(ref b, i)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.AddByteOffset(ref b, i + 4)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.AddByteOffset(ref b, i + 8)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.AddByteOffset(ref b, i + 12)) = 0;
-#endif
-
- i = counter;
-
- // See notes above for why this wasn't used instead
- // i += 16;
- }
- while (counter <= end);
-
- if ((byteLength & 8) != 0)
- {
-#if TARGET_64BIT
- Unsafe.As<byte, long>(ref Unsafe.AddByteOffset<byte>(ref b, i)) = 0;
-#else
- Unsafe.As<byte, int>(ref Unsafe.AddByteOffset(ref b, i)) = 0;
- Unsafe.As<byte, int>(ref Unsafe.AddByteOffset(ref b, i + 4)) = 0;
-#endif
- i += 8;
- }
- if ((byteLength & 4) != 0)
- {
- Unsafe.As<byte, int>(ref Unsafe.AddByteOffset(ref b, i)) = 0;
- i += 4;
- }
- if ((byteLength & 2) != 0)
- {
- Unsafe.As<byte, short>(ref Unsafe.AddByteOffset(ref b, i)) = 0;
- i += 2;
- }
- if ((byteLength & 1) != 0)
- {
- Unsafe.AddByteOffset(ref b, i) = 0;
- // We're not using i after this, so not needed
- // i += 1;
- }
-
- return;
-#endif
-
- PInvoke:
- Buffer._ZeroMemory(ref b, byteLength);
- }
-
public static unsafe void ClearWithReferences(ref IntPtr ip, nuint pointerSizeLength)
{
Debug.Assert((int)Unsafe.AsPointer(ref ip) % sizeof(IntPtr) == 0, "Should've been aligned on natural word boundary.");
@@ -650,7 +329,6 @@ namespace System
ReverseInner(ref elements, length);
}
-#pragma warning disable IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static void ReverseInner<T>(ref T elements, nuint length)
{
@@ -667,6 +345,5 @@ namespace System
last = ref Unsafe.Subtract(ref last, 1);
} while (Unsafe.IsAddressLessThan(ref first, ref last));
}
-#pragma warning restore IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
}
}
diff --git a/src/libraries/System.Private.CoreLib/src/System/Threading/Lock.cs b/src/libraries/System.Private.CoreLib/src/System/Threading/Lock.cs
index 66cf6a03f607..056ff96d41b1 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Threading/Lock.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Threading/Lock.cs
@@ -38,25 +38,9 @@ namespace System.Threading
private uint _state; // see State for layout
private uint _recursionCount;
private short _spinCount;
-
- // The lowest bit is a flag, when set it indicates that the lock should use trivial waits
- private ushort _waiterStartTimeMsAndFlags;
-
+ private ushort _waiterStartTimeMs;
private AutoResetEvent? _waitEvent;
-#if NATIVEAOT // The method needs to be public in NativeAOT so that other private libraries can access it
- public Lock(bool useTrivialWaits)
-#else
- internal Lock(bool useTrivialWaits)
-#endif
- : this()
- {
- if (useTrivialWaits)
- {
- _waiterStartTimeMsAndFlags = 1;
- }
- }
-
/// <summary>
/// Enters the lock. Once the method returns, the calling thread would be the only thread that holds the lock.
/// </summary>
@@ -460,9 +444,9 @@ namespace System.Threading
Wait:
bool areContentionEventsEnabled =
- NativeRuntimeEventSource.Log.IsEnabled(
+ NativeRuntimeEventSource.Log?.IsEnabled(
EventLevel.Informational,
- NativeRuntimeEventSource.Keywords.ContentionKeyword);
+ NativeRuntimeEventSource.Keywords.ContentionKeyword) ?? false;
AutoResetEvent waitEvent = _waitEvent ?? CreateWaitEvent(areContentionEventsEnabled);
if (State.TryLockBeforeWait(this))
{
@@ -479,7 +463,7 @@ namespace System.Threading
long waitStartTimeTicks = 0;
if (areContentionEventsEnabled)
{
- NativeRuntimeEventSource.Log.ContentionStart(this);
+ NativeRuntimeEventSource.Log!.ContentionStart(this);
waitStartTimeTicks = Stopwatch.GetTimestamp();
}
@@ -488,7 +472,7 @@ namespace System.Threading
int remainingTimeoutMs = timeoutMs;
while (true)
{
- if (!waitEvent.WaitOneNoCheck(remainingTimeoutMs, UseTrivialWaits))
+ if (!waitEvent.WaitOne(remainingTimeoutMs))
{
break;
}
@@ -551,7 +535,7 @@ namespace System.Threading
{
double waitDurationNs =
(Stopwatch.GetTimestamp() - waitStartTimeTicks) * 1_000_000_000.0 / Stopwatch.Frequency;
- NativeRuntimeEventSource.Log.ContentionStop(waitDurationNs);
+ NativeRuntimeEventSource.Log!.ContentionStop(waitDurationNs);
}
return currentThreadId;
@@ -567,19 +551,7 @@ namespace System.Threading
return new ThreadId(0);
}
- // Trivial waits are:
- // - Not interruptible by Thread.Interrupt
- // - Don't allow reentrance through APCs or message pumping
- // - Not forwarded to SynchronizationContext wait overrides
- private bool UseTrivialWaits => (_waiterStartTimeMsAndFlags & 1) != 0;
-
- private ushort WaiterStartTimeMs
- {
- get => (ushort)(_waiterStartTimeMsAndFlags >> 1);
- set => _waiterStartTimeMsAndFlags = (ushort)((value << 1) | (_waiterStartTimeMsAndFlags & 1));
- }
-
- private void ResetWaiterStartTime() => WaiterStartTimeMs = 0;
+ private void ResetWaiterStartTime() => _waiterStartTimeMs = 0;
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private void RecordWaiterStartTime()
@@ -590,7 +562,7 @@ namespace System.Threading
// Don't record zero, that value is reserved for indicating that a time is not recorded
currentTimeMs--;
}
- WaiterStartTimeMs = currentTimeMs;
+ _waiterStartTimeMs = currentTimeMs;
}
private bool ShouldStopPreemptingWaiters
@@ -599,7 +571,7 @@ namespace System.Threading
get
{
// If the recorded time is zero, a time has not been recorded yet
- ushort waiterStartTimeMs = WaiterStartTimeMs;
+ ushort waiterStartTimeMs = _waiterStartTimeMs;
return
waiterStartTimeMs != 0 &&
(ushort)Environment.TickCount - waiterStartTimeMs >= MaxDurationMsForPreemptingWaiters;
diff --git a/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.Unix.cs b/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.Unix.cs
index 96253742b031..3ba1cb132e27 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.Unix.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.Unix.cs
@@ -7,8 +7,8 @@ namespace System.Threading
{
public abstract partial class WaitHandle
{
- private static int WaitOneCore(IntPtr handle, int millisecondsTimeout, bool useTrivialWaits) =>
- WaitSubsystem.Wait(handle, millisecondsTimeout, interruptible: !useTrivialWaits);
+ private static int WaitOneCore(IntPtr handle, int millisecondsTimeout) =>
+ WaitSubsystem.Wait(handle, millisecondsTimeout, true);
private static int WaitMultipleIgnoringSyncContextCore(Span<IntPtr> handles, bool waitAll, int millisecondsTimeout) =>
WaitSubsystem.Wait(handles, waitAll, millisecondsTimeout);
diff --git a/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.Windows.cs b/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.Windows.cs
index 42827b552565..bae0a8f3a23c 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.Windows.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.Windows.cs
@@ -14,11 +14,11 @@ namespace System.Threading
{
fixed (IntPtr* pHandles = &MemoryMarshal.GetReference(handles))
{
- return WaitForMultipleObjectsIgnoringSyncContext(pHandles, handles.Length, waitAll, millisecondsTimeout, useTrivialWaits: false);
+ return WaitForMultipleObjectsIgnoringSyncContext(pHandles, handles.Length, waitAll, millisecondsTimeout);
}
}
- private static unsafe int WaitForMultipleObjectsIgnoringSyncContext(IntPtr* pHandles, int numHandles, bool waitAll, int millisecondsTimeout, bool useTrivialWaits)
+ private static unsafe int WaitForMultipleObjectsIgnoringSyncContext(IntPtr* pHandles, int numHandles, bool waitAll, int millisecondsTimeout)
{
Debug.Assert(millisecondsTimeout >= -1);
@@ -27,8 +27,7 @@ namespace System.Threading
waitAll = false;
#if NATIVEAOT // TODO: reentrant wait support https://github.com/dotnet/runtime/issues/49518
- // Trivial waits don't allow reentrance
- bool reentrantWait = !useTrivialWaits && Thread.ReentrantWaitsEnabled;
+ bool reentrantWait = Thread.ReentrantWaitsEnabled;
if (reentrantWait)
{
@@ -93,9 +92,9 @@ namespace System.Threading
return result;
}
- internal static unsafe int WaitOneCore(IntPtr handle, int millisecondsTimeout, bool useTrivialWaits)
+ internal static unsafe int WaitOneCore(IntPtr handle, int millisecondsTimeout)
{
- return WaitForMultipleObjectsIgnoringSyncContext(&handle, 1, false, millisecondsTimeout, useTrivialWaits);
+ return WaitForMultipleObjectsIgnoringSyncContext(&handle, 1, false, millisecondsTimeout);
}
private static int SignalAndWaitCore(IntPtr handleToSignal, IntPtr handleToWaitOn, int millisecondsTimeout)
diff --git a/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.cs b/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.cs
index 21920bc39b75..58b5d8341414 100644
--- a/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.cs
+++ b/src/libraries/System.Private.CoreLib/src/System/Threading/WaitHandle.cs
@@ -106,7 +106,6 @@ namespace System.Threading
internal bool WaitOneNoCheck(
int millisecondsTimeout,
- bool useTrivialWaits = false,
object? associatedObject = null,
NativeRuntimeEventSource.WaitHandleWaitSourceMap waitSource = NativeRuntimeEventSource.WaitHandleWaitSourceMap.Unknown)
{
@@ -123,26 +122,22 @@ namespace System.Threading
waitHandle.DangerousAddRef(ref success);
int waitResult = WaitFailed;
-
- // Check if the wait should be forwarded to a SynchronizationContext wait override. Trivial waits don't allow
- // reentrance or interruption, and are not forwarded.
- bool usedSyncContextWait = false;
- if (!useTrivialWaits)
+ SynchronizationContext? context = SynchronizationContext.Current;
+ if (context != null && context.IsWaitNotificationRequired())
{
- SynchronizationContext? context = SynchronizationContext.Current;
- if (context != null && context.IsWaitNotificationRequired())
- {
- usedSyncContextWait = true;
- waitResult = context.Wait(new[] { waitHandle.DangerousGetHandle() }, false, millisecondsTimeout);
- }
+ waitResult = context.Wait(new[] { waitHandle.DangerousGetHandle() }, false, millisecondsTimeout);
}
-
- if (!usedSyncContextWait)
+ else
{
#if !CORECLR // CoreCLR sends the wait events from the native side
bool sendWaitEvents =
millisecondsTimeout != 0 &&
- !useTrivialWaits &&
+#if NATIVEAOT
+ // A null check is necessary in NativeAOT due to the possibility of reentrance during class
+ // construction, as this path can be reached through Lock. See
+ // https://github.com/dotnet/runtime/issues/94728 for a call stack.
+ NativeRuntimeEventSource.Log != null &&
+#endif
NativeRuntimeEventSource.Log.IsEnabled(
EventLevel.Verbose,
NativeRuntimeEventSource.Keywords.WaitHandleKeyword);
@@ -154,7 +149,7 @@ namespace System.Threading
waitSource != NativeRuntimeEventSource.WaitHandleWaitSourceMap.MonitorWait;
if (tryNonblockingWaitFirst)
{
- waitResult = WaitOneCore(waitHandle.DangerousGetHandle(), 0 /* millisecondsTimeout */, useTrivialWaits);
+ waitResult = WaitOneCore(waitHandle.DangerousGetHandle(), millisecondsTimeout: 0);
if (waitResult == WaitTimeout)
{
// Do a full wait and send the wait events
@@ -176,7 +171,7 @@ namespace System.Threading
if (!tryNonblockingWaitFirst)
#endif
{
- waitResult = WaitOneCore(waitHandle.DangerousGetHandle(), millisecondsTimeout, useTrivialWaits);
+ waitResult = WaitOneCore(waitHandle.DangerousGetHandle(), millisecondsTimeout);
}
#if !CORECLR // CoreCLR sends the wait events from the native side
diff --git a/src/libraries/System.Private.Xml/src/System/Xml/Schema/XsdDateTime.cs b/src/libraries/System.Private.Xml/src/System/Xml/Schema/XsdDateTime.cs
index 10b78d608233..7e75cc55afad 100644
--- a/src/libraries/System.Private.Xml/src/System/Xml/Schema/XsdDateTime.cs
+++ b/src/libraries/System.Private.Xml/src/System/Xml/Schema/XsdDateTime.cs
@@ -396,6 +396,7 @@ namespace System.Xml.Schema
{
case DateTimeTypeCode.GMonth:
case DateTimeTypeCode.GDay:
+ // codeql[cs/leap-year/unsafe-date-construction-from-two-elements] - The XML specification does not explicitly define this behavior for parsing in a non-leap year. We intentionally throw here. Altering this behavior to be more resilient, producing dates like 2/28 or 3/1, could introduce unintended consequences and may not be desirable for user.
result = new DateTime(DateTime.Now.Year, xdt.Month, xdt.Day);
break;
case DateTimeTypeCode.Time:
diff --git a/src/libraries/System.Runtime.Numerics/src/System/Number.BigInteger.cs b/src/libraries/System.Runtime.Numerics/src/System/Number.BigInteger.cs
index a5fc0e6f1f1d..01db0ef3777e 100644
--- a/src/libraries/System.Runtime.Numerics/src/System/Number.BigInteger.cs
+++ b/src/libraries/System.Runtime.Numerics/src/System/Number.BigInteger.cs
@@ -445,7 +445,7 @@ namespace System
int leadingBitsCount = value.Length % TParser.DigitsPerBlock;
uint leading = signBits;
- // First parse unanligned leading block if exists.
+ // First parse unaligned leading block if exists.
if (leadingBitsCount != 0)
{
if (!TParser.TryParseUnalignedBlock(value[0..leadingBitsCount], out leading))
@@ -484,8 +484,8 @@ namespace System
// Require to store in _bits.
// Positive: sign=1, bits=[leading]
- // Negative: sign=-1, bits=[leading ^ -1 + 1]=[-leading]
- result = new BigInteger((int)signBits | 1, [leading ^ signBits - signBits]);
+ // Negative: sign=-1, bits=[(leading ^ -1) + 1]=[-leading]
+ result = new BigInteger((int)signBits | 1, [(leading ^ signBits) - signBits]);
return ParsingStatus.OK;
}
else
diff --git a/src/libraries/System.Runtime.Numerics/tests/BigInteger/parse.cs b/src/libraries/System.Runtime.Numerics/tests/BigInteger/parse.cs
index 86652ef6550a..9852ea93bf1a 100644
--- a/src/libraries/System.Runtime.Numerics/tests/BigInteger/parse.cs
+++ b/src/libraries/System.Runtime.Numerics/tests/BigInteger/parse.cs
@@ -143,6 +143,18 @@ namespace System.Numerics.Tests
Assert.True(BigInteger.TryParse("080000001", NumberStyles.HexNumber, null, out result));
Assert.Equal(0x80000001u, result);
+ Assert.True(BigInteger.TryParse("F0000001", NumberStyles.HexNumber, null, out result));
+ Assert.Equal(-0xFFFFFFFL, result);
+
+ Assert.True(BigInteger.TryParse("0F0000001", NumberStyles.HexNumber, null, out result));
+ Assert.Equal(0xF0000001u, result);
+
+ Assert.True(BigInteger.TryParse("F00000001", NumberStyles.HexNumber, null, out result));
+ Assert.Equal(-0xFFFFFFFFL, result);
+
+ Assert.True(BigInteger.TryParse("0F00000001", NumberStyles.HexNumber, null, out result));
+ Assert.Equal(0xF00000001u, result);
+
// Regression test for: https://github.com/dotnet/runtime/issues/74758
Assert.True(BigInteger.TryParse("FFFFFFFFE", NumberStyles.HexNumber, null, out result));
Assert.Equal(new BigInteger(-2), result);
@@ -175,6 +187,7 @@ namespace System.Numerics.Tests
[InlineData("10000000000000000000000000000000", (long)int.MinValue)]
[InlineData("010000000000000000000000000000001", 0x080000001L)]
[InlineData("111111111111111111111111111111110", -2L)]
+ [InlineData("100000000000000000000000000000001", -0xFFFFFFFFL)]
[InlineData("0111111111111111111111111111111111", 0x1FFFFFFFFL)]
public void Parse_BinSpecialCases(string input, long expectedValue)
{
diff --git a/src/libraries/System.Runtime/tests/System.Runtime.CompilerServices.Unsafe.Tests/UnsafeTests.cs b/src/libraries/System.Runtime/tests/System.Runtime.CompilerServices.Unsafe.Tests/UnsafeTests.cs
index 13488161781d..6eb527f92325 100644
--- a/src/libraries/System.Runtime/tests/System.Runtime.CompilerServices.Unsafe.Tests/UnsafeTests.cs
+++ b/src/libraries/System.Runtime/tests/System.Runtime.CompilerServices.Unsafe.Tests/UnsafeTests.cs
@@ -462,6 +462,26 @@ namespace System.Runtime.CompilerServices
Assert.Equal(new IntPtr(-3), Unsafe.ByteOffset(ref byte4.B3, ref byte4.B0));
}
+ private static unsafe class StaticReadonlyHolder
+ {
+ public static readonly void* Pointer = (void*)RuntimeHelpers.AllocateTypeAssociatedMemory(typeof(StaticReadonlyHolder), 1);
+ }
+
+ [Fact]
+ public static unsafe void ByteOffsetConstantRef()
+ {
+ // https://github.com/dotnet/runtime/pull/99019
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static nint NullTest(ref byte origin) => Unsafe.ByteOffset(ref origin, ref Unsafe.NullRef<byte>());
+ Assert.Equal(0, NullTest(ref Unsafe.NullRef<byte>()));
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ static ref byte GetStatic(ref byte x) => ref x;
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static nint StaticReadonlyTest(ref byte x) => Unsafe.ByteOffset(ref GetStatic(ref Unsafe.AsRef<byte>(StaticReadonlyHolder.Pointer)), ref x);
+ Assert.Equal(0, StaticReadonlyTest(ref Unsafe.AsRef<byte>(StaticReadonlyHolder.Pointer)));
+ }
+
[Fact]
public static unsafe void AsRef()
{
@@ -597,7 +617,7 @@ namespace System.Runtime.CompilerServices
}
[Fact]
- public static void RefSubtract()
+ public static unsafe void RefSubtract()
{
string[] a = new string[] { "abc", "def", "ghi", "jkl" };
@@ -609,6 +629,11 @@ namespace System.Runtime.CompilerServices
ref string r3 = ref Unsafe.Subtract(ref r2, 3);
Assert.Equal("abc", r3);
+
+ // https://github.com/dotnet/runtime/pull/99019
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static ref byte NullTest(nuint offset) => ref Unsafe.Subtract(ref Unsafe.NullRef<byte>(), offset);
+ Assert.True(Unsafe.IsNullRef(ref NullTest(0)));
}
[Fact]
diff --git a/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Runtime/CompilerServices/RuntimeHelpersTests.cs b/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Runtime/CompilerServices/RuntimeHelpersTests.cs
index 7359e11283f7..5148ca0c488d 100644
--- a/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Runtime/CompilerServices/RuntimeHelpersTests.cs
+++ b/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Runtime/CompilerServices/RuntimeHelpersTests.cs
@@ -69,6 +69,26 @@ namespace System.Runtime.CompilerServices.Tests
}
[Fact]
+ public static void EqualsTest()
+ {
+ // Boolean RuntimeHelpers.Equals(Object, Object)
+
+ Assert.True(RuntimeHelpers.Equals(Guid.Empty, Guid.Empty));
+ Assert.False(RuntimeHelpers.Equals(Guid.Empty, Guid.NewGuid()));
+
+ // Reference equal
+ object o = new object();
+ Assert.True(RuntimeHelpers.Equals(o, o));
+
+ // Type mismatch
+ Assert.False(RuntimeHelpers.Equals(Guid.Empty, string.Empty));
+
+ // Non value types
+ Assert.False(RuntimeHelpers.Equals(new object(), new object()));
+ Assert.False(RuntimeHelpers.Equals(new int[] { 1, 2, 3 }, new int[] { 1, 2, 3 }));
+ }
+
+ [Fact]
public static void InitializeArray()
{
// Void RuntimeHelpers.InitializeArray(Array, RuntimeFieldHandle)
@@ -374,7 +394,6 @@ namespace System.Runtime.CompilerServices.Tests
}
[Fact]
- [SkipOnMono("Not presently implemented on Mono")]
public static void AllocateTypeAssociatedMemoryInvalidArguments()
{
Assert.Throws<ArgumentException>(() => { RuntimeHelpers.AllocateTypeAssociatedMemory(null, 10); });
@@ -382,7 +401,6 @@ namespace System.Runtime.CompilerServices.Tests
}
[Fact]
- [SkipOnMono("Not presently implemented on Mono")]
public static unsafe void AllocateTypeAssociatedMemoryValidArguments()
{
IntPtr memory = RuntimeHelpers.AllocateTypeAssociatedMemory(typeof(RuntimeHelpersTests), 32);
diff --git a/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Type/TypeTests.cs b/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Type/TypeTests.cs
index 13fac33e4eb3..ab1d6c66c127 100644
--- a/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Type/TypeTests.cs
+++ b/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/Type/TypeTests.cs
@@ -1188,9 +1188,6 @@ namespace System.Tests
}
else
{
- // [ActiveIssue("https://github.com/dotnet/runtime/issues/90863")]
- if (classType.Type == typeof(SIMs.C2Implicit<string>) && interfaceType.Type == typeof(SIMs.I1<string>)) continue;
-
// It's implemented implicitly by the level 2 interface
MTarget = interfaceType.Level2InterfaceType.GetMethod(interfaceType.MethodNamePrefix + "M", bindingFlags);
GTarget = interfaceType.Level2InterfaceType.GetMethod(interfaceType.MethodNamePrefix + "G", bindingFlags);
@@ -1315,7 +1312,7 @@ namespace System.Tests
static class DIMs
{
-
+
internal interface I1
{
void M() { throw new Exception("e"); }
diff --git a/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/ValueTypeTests.cs b/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/ValueTypeTests.cs
index 92a2c006ce20..92c7000ed414 100644
--- a/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/ValueTypeTests.cs
+++ b/src/libraries/System.Runtime/tests/System.Runtime.Tests/System/ValueTypeTests.cs
@@ -315,6 +315,21 @@ namespace System.Tests
Assert.Equal(obj1.GetHashCode(), obj2.GetHashCode());
}
+ [Fact]
+ public static void StructWithNestedOverriddenNotBitwiseComparableTest()
+ {
+ StructWithNestedOverriddenNotBitwiseComparable obj1 = new StructWithNestedOverriddenNotBitwiseComparable();
+ obj1.value1.value = 1;
+ obj1.value2.value = 0;
+
+ StructWithNestedOverriddenNotBitwiseComparable obj2 = new StructWithNestedOverriddenNotBitwiseComparable();
+ obj2.value1.value = -1;
+ obj2.value2.value = 0;
+
+ Assert.True(obj1.Equals(obj2));
+ Assert.Equal(obj1.GetHashCode(), obj2.GetHashCode());
+ }
+
public struct S
{
public int x;
@@ -413,5 +428,20 @@ namespace System.Tests
public object o;
public StructNonOverriddenEqualsOrGetHasCode value;
}
+
+ public struct StructOverriddenNotBitwiseComparable
+ {
+ public int value;
+
+ public override bool Equals(object obj) => obj is StructOverriddenNotBitwiseComparable other && (value == other.value || value == -other.value);
+
+ public override int GetHashCode() => value < 0 ? -value : value;
+ }
+
+ public struct StructWithNestedOverriddenNotBitwiseComparable
+ {
+ public StructOverriddenNotBitwiseComparable value1;
+ public StructOverriddenNotBitwiseComparable value2;
+ }
}
}
diff --git a/src/libraries/System.Security.Cryptography/src/Microsoft/Win32/SafeHandles/SafePasswordHandle.cs b/src/libraries/System.Security.Cryptography/src/Microsoft/Win32/SafeHandles/SafePasswordHandle.cs
index f53e582af6e2..ec3c1d6e995f 100644
--- a/src/libraries/System.Security.Cryptography/src/Microsoft/Win32/SafeHandles/SafePasswordHandle.cs
+++ b/src/libraries/System.Security.Cryptography/src/Microsoft/Win32/SafeHandles/SafePasswordHandle.cs
@@ -2,6 +2,7 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System;
+using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Security;
@@ -35,8 +36,8 @@ namespace Microsoft.Win32.SafeHandles
public SafePasswordHandle(ReadOnlySpan<char> password, bool passwordProvided)
: base(ownsHandle: true)
{
- // "".AsSpan() is not default, so this is compat for "null tries NULL first".
- if (password != default)
+ // "".AsSpan() does not contain a null ref, so this is compat for "null tries NULL first".
+ if (!Unsafe.IsNullRef(ref MemoryMarshal.GetReference(password)))
{
int spanLen;
diff --git a/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/HashProviderCng.cs b/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/HashProviderCng.cs
index 831b846a7444..36f4767e9939 100644
--- a/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/HashProviderCng.cs
+++ b/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/HashProviderCng.cs
@@ -39,7 +39,7 @@ namespace System.Security.Cryptography
// So keep hHash trapped in this scope to prevent (mis-)use of it.
{
SafeBCryptHashHandle hHash;
- NTSTATUS ntStatus = Interop.BCrypt.BCryptCreateHash(_hAlgorithm, out hHash, IntPtr.Zero, 0, key, key == null ? 0 : key.Length, BCryptCreateHashFlags.BCRYPT_HASH_REUSABLE_FLAG);
+ NTSTATUS ntStatus = Interop.BCrypt.BCryptCreateHash(_hAlgorithm, out hHash, IntPtr.Zero, 0, key, key.Length, BCryptCreateHashFlags.BCRYPT_HASH_REUSABLE_FLAG);
if (ntStatus == NTSTATUS.STATUS_INVALID_PARAMETER)
{
hHash.Dispose();
diff --git a/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/X509Certificates/CertificateRequest.cs b/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/X509Certificates/CertificateRequest.cs
index c3084c964d7d..405d37188e37 100644
--- a/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/X509Certificates/CertificateRequest.cs
+++ b/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/X509Certificates/CertificateRequest.cs
@@ -812,7 +812,7 @@ namespace System.Security.Cryptography.X509Certificates
if (notAfter < notBefore)
throw new ArgumentException(SR.Cryptography_CertReq_DatesReversed);
- if (serialNumber == null || serialNumber.Length < 1)
+ if (serialNumber.Length < 1)
throw new ArgumentException(SR.Arg_EmptyOrNullArray, nameof(serialNumber));
byte[] signatureAlgorithm = generator.GetSignatureAlgorithmIdentifier(HashAlgorithm);
diff --git a/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/X509Certificates/X500NameEncoder.cs b/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/X509Certificates/X500NameEncoder.cs
index d2509eca3c3d..865b18e09518 100644
--- a/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/X509Certificates/X500NameEncoder.cs
+++ b/src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/X509Certificates/X500NameEncoder.cs
@@ -444,7 +444,6 @@ namespace System.Security.Cryptography.X509Certificates
// then some whitespace.
case ParseState.MaybeEndQuote:
case ParseState.SeekComma:
- Debug.Assert(tagOid != null);
Debug.Assert(valueStart != -1);
Debug.Assert(valueEnd != -1);
diff --git a/src/libraries/System.Text.Json/src/System/Text/Json/Document/JsonDocument.Parse.cs b/src/libraries/System.Text.Json/src/System/Text/Json/Document/JsonDocument.Parse.cs
index e5719888d80a..f9b1fffd4243 100644
--- a/src/libraries/System.Text.Json/src/System/Text/Json/Document/JsonDocument.Parse.cs
+++ b/src/libraries/System.Text.Json/src/System/Text/Json/Document/JsonDocument.Parse.cs
@@ -165,8 +165,6 @@ namespace System.Text.Json
internal static JsonDocument ParseValue(ReadOnlySpan<byte> utf8Json, JsonDocumentOptions options)
{
- Debug.Assert(utf8Json != null);
-
byte[] owned = new byte[utf8Json.Length];
utf8Json.CopyTo(owned);
diff --git a/src/libraries/System.Text.Json/src/System/Text/Json/Document/JsonElement.cs b/src/libraries/System.Text.Json/src/System/Text/Json/Document/JsonElement.cs
index c5d687892335..1ca7fff9f7e3 100644
--- a/src/libraries/System.Text.Json/src/System/Text/Json/Document/JsonElement.cs
+++ b/src/libraries/System.Text.Json/src/System/Text/Json/Document/JsonElement.cs
@@ -5,6 +5,7 @@ using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
namespace System.Text.Json
{
@@ -1243,7 +1244,7 @@ namespace System.Text.Json
if (TokenType == JsonTokenType.Null)
{
// This is different than Length == 0, in that it tests true for null, but false for ""
- return utf8Text == default;
+ return Unsafe.IsNullRef(ref MemoryMarshal.GetReference(utf8Text));
}
return TextEqualsHelper(utf8Text, isPropertyName: false, shouldUnescape: true);
@@ -1271,7 +1272,7 @@ namespace System.Text.Json
if (TokenType == JsonTokenType.Null)
{
// This is different than Length == 0, in that it tests true for null, but false for ""
- return text == default;
+ return Unsafe.IsNullRef(ref MemoryMarshal.GetReference(text));
}
return TextEqualsHelper(text, isPropertyName: false);
diff --git a/src/libraries/System.Text.Json/tests/System.Text.Json.SourceGeneration.Unit.Tests/JsonSourceGeneratorDiagnosticsTests.cs b/src/libraries/System.Text.Json/tests/System.Text.Json.SourceGeneration.Unit.Tests/JsonSourceGeneratorDiagnosticsTests.cs
index a554d2681d43..5e5d83de2c09 100644
--- a/src/libraries/System.Text.Json/tests/System.Text.Json.SourceGeneration.Unit.Tests/JsonSourceGeneratorDiagnosticsTests.cs
+++ b/src/libraries/System.Text.Json/tests/System.Text.Json.SourceGeneration.Unit.Tests/JsonSourceGeneratorDiagnosticsTests.cs
@@ -11,6 +11,8 @@ namespace System.Text.Json.SourceGeneration.UnitTests
{
[ActiveIssue("https://github.com/dotnet/runtime/issues/58226", TestPlatforms.Browser)]
[SkipOnCoreClr("https://github.com/dotnet/runtime/issues/71962", ~RuntimeConfiguration.Release)]
+ [SkipOnMono("https://github.com/dotnet/runtime/issues/92467")]
+ [ConditionalClass(typeof(PlatformDetection), nameof(PlatformDetection.IsNotX86Process))] // https://github.com/dotnet/runtime/issues/71962
public class JsonSourceGeneratorDiagnosticsTests
{
/// <summary>
diff --git a/src/libraries/System.Text.Json/tests/System.Text.Json.SourceGeneration.Unit.Tests/JsonSourceGeneratorIncrementalTests.cs b/src/libraries/System.Text.Json/tests/System.Text.Json.SourceGeneration.Unit.Tests/JsonSourceGeneratorIncrementalTests.cs
index daa6498cbc9b..5bcb01a94bde 100644
--- a/src/libraries/System.Text.Json/tests/System.Text.Json.SourceGeneration.Unit.Tests/JsonSourceGeneratorIncrementalTests.cs
+++ b/src/libraries/System.Text.Json/tests/System.Text.Json.SourceGeneration.Unit.Tests/JsonSourceGeneratorIncrementalTests.cs
@@ -13,6 +13,8 @@ namespace System.Text.Json.SourceGeneration.UnitTests
{
[ActiveIssue("https://github.com/dotnet/runtime/issues/58226", TestPlatforms.Browser)]
[SkipOnCoreClr("https://github.com/dotnet/runtime/issues/71962", ~RuntimeConfiguration.Release)]
+ [SkipOnMono("https://github.com/dotnet/runtime/issues/92467")]
+ [ConditionalClass(typeof(PlatformDetection), nameof(PlatformDetection.IsNotX86Process))] // https://github.com/dotnet/runtime/issues/71962
public static class JsonSourceGeneratorIncrementalTests
{
[Theory]
diff --git a/src/libraries/System.Text.Json/tests/System.Text.Json.SourceGeneration.Unit.Tests/JsonSourceGeneratorTests.cs b/src/libraries/System.Text.Json/tests/System.Text.Json.SourceGeneration.Unit.Tests/JsonSourceGeneratorTests.cs
index eb6d0991585c..e2f08b988441 100644
--- a/src/libraries/System.Text.Json/tests/System.Text.Json.SourceGeneration.Unit.Tests/JsonSourceGeneratorTests.cs
+++ b/src/libraries/System.Text.Json/tests/System.Text.Json.SourceGeneration.Unit.Tests/JsonSourceGeneratorTests.cs
@@ -10,6 +10,8 @@ namespace System.Text.Json.SourceGeneration.UnitTests
{
[ActiveIssue("https://github.com/dotnet/runtime/issues/58226", TestPlatforms.Browser)]
[SkipOnCoreClr("https://github.com/dotnet/runtime/issues/71962", ~RuntimeConfiguration.Release)]
+ [SkipOnMono("https://github.com/dotnet/runtime/issues/92467")]
+ [ConditionalClass(typeof(PlatformDetection), nameof(PlatformDetection.IsNotX86Process))] // https://github.com/dotnet/runtime/issues/71962
public class GeneratorTests
{
[Fact]
diff --git a/src/libraries/System.Text.RegularExpressions/gen/RegexGenerator.Emitter.cs b/src/libraries/System.Text.RegularExpressions/gen/RegexGenerator.Emitter.cs
index 7e7fed6cab65..f0f6fc9da0e1 100644
--- a/src/libraries/System.Text.RegularExpressions/gen/RegexGenerator.Emitter.cs
+++ b/src/libraries/System.Text.RegularExpressions/gen/RegexGenerator.Emitter.cs
@@ -10,6 +10,7 @@ using System.Diagnostics.CodeAnalysis;
using System.Globalization;
using System.IO;
using System.Linq;
+using System.Runtime.CompilerServices;
using System.Security.Cryptography;
using System.Threading;
using Microsoft.CodeAnalysis;
@@ -732,6 +733,11 @@ namespace System.Text.RegularExpressions.Generator
EmitIndexOfString_RightToLeft();
break;
+ case FindNextStartingPositionMode.LeadingStrings_LeftToRight:
+ case FindNextStartingPositionMode.LeadingStrings_OrdinalIgnoreCase_LeftToRight:
+ EmitIndexOfStrings_LeftToRight();
+ break;
+
case FindNextStartingPositionMode.LeadingSet_LeftToRight:
case FindNextStartingPositionMode.FixedDistanceSets_LeftToRight:
EmitFixedSet_LeftToRight();
@@ -1041,6 +1047,37 @@ namespace System.Text.RegularExpressions.Generator
}
}
+ // Emits a case-sensitive left-to-right search for any one of multiple leading prefixes.
+ void EmitIndexOfStrings_LeftToRight()
+ {
+ RegexFindOptimizations opts = regexTree.FindOptimizations;
+ Debug.Assert(opts.FindMode is FindNextStartingPositionMode.LeadingStrings_LeftToRight or FindNextStartingPositionMode.LeadingStrings_OrdinalIgnoreCase_LeftToRight);
+
+ string prefixes = string.Join(", ", opts.LeadingPrefixes.Select(prefix => Literal(prefix)));
+ StringComparison stringComparison = opts.FindMode is FindNextStartingPositionMode.LeadingStrings_OrdinalIgnoreCase_LeftToRight ?
+ StringComparison.OrdinalIgnoreCase :
+ StringComparison.Ordinal;
+ string fieldName = GetSHA256FieldName($"s_indexOfAnyStrings_{stringComparison}_", prefixes);
+
+ if (!requiredHelpers.ContainsKey(fieldName))
+ {
+ requiredHelpers.Add(fieldName,
+ [
+ $"/// <summary>Supports searching for the specified strings.</summary>",
+ $"internal static readonly SearchValues<string> {fieldName} = SearchValues.Create([{prefixes}], StringComparison.{stringComparison});", // explicitly using an array in case prefixes is large
+ ]);
+ }
+
+ writer.WriteLine($"// The pattern has multiple strings that could begin the match. Search for any of them.");
+ writer.WriteLine($"// If none can be found, there's no match.");
+ writer.WriteLine($"int i = inputSpan.Slice(pos).IndexOfAny({HelpersTypeName}.{fieldName});");
+ using (EmitBlock(writer, "if (i >= 0)"))
+ {
+ writer.WriteLine("base.runtextpos = pos + i;");
+ writer.WriteLine("return true;");
+ }
+ }
+
// Emits a case-sensitive right-to-left search for a substring.
void EmitIndexOfString_RightToLeft()
{
@@ -1416,6 +1453,16 @@ namespace System.Text.RegularExpressions.Generator
HashSet<string> additionalDeclarations = new();
Dictionary<string, string[]> additionalLocalFunctions = new();
+ // In debug builds, additional code is emitted to validate that the backtracking stack is being maintained appropriately.
+ // When state is pushed onto the backtracking stack, an additional known value is pushed, and when it's popped, it's
+ // the popped value is checked against that known value, throwing an exception if they don't match. This validation code
+ // is currently not part of RegexCompiler, though it could be added there in the future if desired.
+#if DEBUG
+#pragma warning disable RS1035 // Random isn't always deterministic, but this is only for debug builds, and we've seeded the Random with a constant
+ Random stackCookieGenerator = new(12345); // seed for deterministic behavior
+#pragma warning restore RS1035
+#endif
+
// Declare some locals.
string sliceSpan = "slice";
writer.WriteLine("int pos = base.runtextpos;");
@@ -1857,6 +1904,7 @@ namespace System.Text.RegularExpressions.Generator
additionalDeclarations.Add($"int {currentBranch} = 0;");
}
+ int stackCookie = CreateStackCookie();
for (int i = 0; i < childCount; i++)
{
// If the alternation isn't atomic, backtracking may require our jump table jumping back
@@ -1896,7 +1944,7 @@ namespace System.Text.RegularExpressions.Generator
// the relevant state is stored in our locals.
if (currentBranch is null)
{
- EmitStackPush(startingCapturePos is not null ?
+ EmitStackPush(stackCookie + i, startingCapturePos is not null ?
[i.ToString(), startingPos, startingCapturePos] :
[i.ToString(), startingPos]);
}
@@ -1966,11 +2014,12 @@ namespace System.Text.RegularExpressions.Generator
string switchClause;
if (currentBranch is null)
{
- // We're in a loop, so we use the backtracking stack to persist our state. Pop it off.
- EmitStackPop(startingCapturePos is not null ?
+ // We're in a loop, so we use the backtracking stack to persist our state.
+ // Pop it off and validate the stack position.
+ EmitStackPop(0, startingCapturePos is not null ?
[startingCapturePos, startingPos] :
[startingPos]);
- switchClause = StackPop();
+ switchClause = ValidateStackCookieWithAdditionAndReturnPoppedStack(stackCookie);
}
else
{
@@ -2070,6 +2119,7 @@ namespace System.Text.RegularExpressions.Generator
// We're branching in a complicated fashion. Make sure sliceStaticPos is 0.
TransferSliceStaticPosToPos();
+ int stackCookie = CreateStackCookie();
// Get the capture number to test.
int capnum = RegexParser.MapCaptureNumber(node.M, rm.Tree.CaptureNumberSparseMapping);
@@ -2201,7 +2251,7 @@ namespace System.Text.RegularExpressions.Generator
// the local.
if (isInLoop)
{
- EmitStackPop(resumeAt);
+ EmitStackPop(stackCookie, resumeAt);
}
using (EmitBlock(writer, $"switch ({resumeAt})"))
{
@@ -2230,7 +2280,7 @@ namespace System.Text.RegularExpressions.Generator
// so finish outputting our backtracking logic, which involves pushing onto the stack which
// branch to backtrack into. If we're not in a loop, though, nothing else can overwrite this local
// in the interim, so we can avoid pushing it.
- EmitStackPush(resumeAt);
+ EmitStackPush(stackCookie, resumeAt);
}
}
@@ -2298,10 +2348,19 @@ namespace System.Text.RegularExpressions.Generator
writer.WriteLine();
int startingSliceStaticPos = sliceStaticPos;
- // Emit the child. The condition expression is a zero-width assertion, which is atomic,
+ // Emit the condition. The condition expression is a zero-width assertion, which is atomic,
// so prevent backtracking into it.
writer.WriteLine("// Condition:");
- EmitNode(condition);
+ if (rm.Analysis.MayBacktrack(condition))
+ {
+ // Condition expressions are treated like positive lookarounds and thus are implicitly atomic,
+ // so we need to emit the node as atomic if it might backtrack.
+ EmitAtomic(node, null);
+ }
+ else
+ {
+ EmitNode(condition);
+ }
writer.WriteLine();
doneLabel = originalDoneLabel;
@@ -2380,11 +2439,13 @@ namespace System.Text.RegularExpressions.Generator
doneLabel = backtrack;
MarkLabel(backtrack, emitSemicolon: false);
+ int stackCookie = CreateStackCookie();
+
if (isInLoop)
{
// If we're not in a loop, the local will maintain its value until backtracking occurs.
// If we are in a loop, multiple iterations need their own value, so we need to use the stack.
- EmitStackPop(resumeAt);
+ EmitStackPop(stackCookie, resumeAt);
}
using (EmitBlock(writer, $"switch ({resumeAt})"))
@@ -2405,7 +2466,7 @@ namespace System.Text.RegularExpressions.Generator
MarkLabel(endConditional, emitSemicolon: !isInLoop);
if (isInLoop)
{
- EmitStackPush(resumeAt);
+ EmitStackPush(stackCookie, resumeAt);
}
}
}
@@ -2477,12 +2538,13 @@ namespace System.Text.RegularExpressions.Generator
// pushes/pops the starting position before falling through.
writer.WriteLine();
+ int stackCookie = CreateStackCookie();
if (isInLoop)
{
// If we're in a loop, different iterations of the loop need their own
// starting position, so push it on to the stack. If we're not in a loop,
// the local will maintain its value and will suffice.
- EmitStackPush(startingPos);
+ EmitStackPush(stackCookie, startingPos);
}
// Skip past the backtracking section
@@ -2495,7 +2557,7 @@ namespace System.Text.RegularExpressions.Generator
MarkLabel(backtrack, emitSemicolon: false);
if (isInLoop)
{
- EmitStackPop(startingPos);
+ EmitStackPop(stackCookie, startingPos);
}
Goto(doneLabel);
writer.WriteLine();
@@ -2589,6 +2651,7 @@ namespace System.Text.RegularExpressions.Generator
RegexNode child = node.Child(0);
// Ensure we're able to uncapture anything captured by the child.
+ int stackCookie = CreateStackCookie();
bool isInLoop = false;
string? capturePos = null;
bool hasCaptures = rm.Analysis.MayContainCapture(child);
@@ -2599,7 +2662,7 @@ namespace System.Text.RegularExpressions.Generator
isInLoop = rm.Analysis.IsInLoop(node);
if (isInLoop)
{
- EmitStackPush("base.Crawlpos()");
+ EmitStackPush(stackCookie, "base.Crawlpos()");
}
else
{
@@ -2637,7 +2700,15 @@ namespace System.Text.RegularExpressions.Generator
// And uncapture anything if necessary. Negative lookaround captures don't persist beyond the lookaround.
if (hasCaptures)
{
- EmitUncaptureUntil(isInLoop ? StackPop() : capturePos!);
+ if (isInLoop)
+ {
+ EmitUncaptureUntil(StackPop());
+ EmitStackCookieValidate(stackCookie);
+ }
+ else
+ {
+ EmitUncaptureUntil(capturePos!);
+ }
}
doneLabel = originalDoneLabel;
@@ -2817,8 +2888,8 @@ namespace System.Text.RegularExpressions.Generator
// Emits the node for an atomic.
void EmitAtomic(RegexNode node, RegexNode? subsequent)
{
- Debug.Assert(node.Kind is RegexNodeKind.Atomic or RegexNodeKind.PositiveLookaround or RegexNodeKind.NegativeLookaround, $"Unexpected type: {node.Kind}");
- Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}");
+ Debug.Assert(node.Kind is RegexNodeKind.Atomic or RegexNodeKind.PositiveLookaround or RegexNodeKind.NegativeLookaround or RegexNodeKind.ExpressionConditional, $"Unexpected type: {node.Kind}");
+ Debug.Assert(node.Kind is RegexNodeKind.ExpressionConditional ? node.ChildCount() >= 1 : node.ChildCount() == 1, $"Unexpected number of children: {node.ChildCount()}");
Debug.Assert(rm.Analysis.MayBacktrack(node.Child(0)), "Expected child to potentially backtrack");
// Grab the current done label and the current backtracking position. The purpose of the atomic node
@@ -3227,6 +3298,7 @@ namespace System.Text.RegularExpressions.Generator
// point we decrement the matched count as long as it's above the minimum
// required, and try again by flowing to everything that comes after this.
MarkLabel(backtrackingLabel, emitSemicolon: false);
+ int stackCookie = CreateStackCookie();
string? capturePos = null;
if (isInLoop)
{
@@ -3239,7 +3311,7 @@ namespace System.Text.RegularExpressions.Generator
{
EmitUncaptureUntil(StackPop());
}
- EmitStackPop(endingPos, startingPos);
+ EmitStackPop(stackCookie, endingPos, startingPos);
}
else if (expressionHasCaptures)
{
@@ -3294,7 +3366,7 @@ namespace System.Text.RegularExpressions.Generator
// We're in a loop and thus can't rely on locals correctly holding the state we
// need (the locals could be overwritten by a subsequent iteration). Push the state
// on to the backtracking stack.
- EmitStackPush(expressionHasCaptures ?
+ EmitStackPush(stackCookie, expressionHasCaptures ?
[startingPos, endingPos, "base.Crawlpos()"] :
[startingPos, endingPos]);
}
@@ -3535,9 +3607,10 @@ namespace System.Text.RegularExpressions.Generator
if (isInLoop)
{
writer.WriteLine();
+ int stackCookie = CreateStackCookie();
// Store the loop's state.
- EmitStackPush(
+ EmitStackPush(stackCookie,
capturePos is not null && iterationCount is not null ? [startingPos, capturePos, iterationCount] :
capturePos is not null ? [startingPos, capturePos] :
iterationCount is not null ? [startingPos, iterationCount] :
@@ -3553,7 +3626,7 @@ namespace System.Text.RegularExpressions.Generator
MarkLabel(backtrack, emitSemicolon: false);
// Restore the loop's state.
- EmitStackPop(
+ EmitStackPop(stackCookie,
capturePos is not null && iterationCount is not null ? [iterationCount, capturePos, startingPos] :
capturePos is not null ? [capturePos, startingPos] :
iterationCount is not null ? [iterationCount, startingPos] :
@@ -3640,8 +3713,13 @@ namespace System.Text.RegularExpressions.Generator
// iterations, this state needs to be stored on to the backtracking stack.
if (!isAtomic)
{
- int entriesPerIteration = 1/*pos*/ + (iterationMayBeEmpty ? 2/*startingPos+sawEmpty*/ : 0) + (expressionHasCaptures ? 1/*Crawlpos*/ : 0);
- EmitStackPush(
+ int stackCookie = CreateStackCookie();
+ int entriesPerIteration =
+ 1/*pos*/ +
+ (iterationMayBeEmpty ? 2/*startingPos+sawEmpty*/ : 0) +
+ (expressionHasCaptures ? 1/*Crawlpos*/ : 0) +
+ (stackCookie != 0 ? 1 : 0);
+ EmitStackPush(stackCookie,
expressionHasCaptures && iterationMayBeEmpty ? ["pos", startingPos!, sawEmpty!, "base.Crawlpos()"] :
iterationMayBeEmpty ? ["pos", startingPos!, sawEmpty!] :
expressionHasCaptures ? ["pos", "base.Crawlpos()"] :
@@ -3721,7 +3799,7 @@ namespace System.Text.RegularExpressions.Generator
{
EmitUncaptureUntil(StackPop());
}
- EmitStackPop(iterationMayBeEmpty ?
+ EmitStackPop(stackCookie, iterationMayBeEmpty ?
[sawEmpty!, startingPos!, "pos"] :
["pos"]);
SliceInputSpan();
@@ -3778,7 +3856,8 @@ namespace System.Text.RegularExpressions.Generator
// of another loop, then any number of iterations might have such state that needs to be stored,
// and thus it needs to be pushed on to the backtracking stack.
bool isInLoop = rm.Analysis.IsInLoop(node);
- EmitStackPush(
+ stackCookie = CreateStackCookie();
+ EmitStackPush(stackCookie,
!isInLoop ? (expressionHasCaptures ? ["pos", "base.Crawlpos()"] : ["pos"]) :
iterationMayBeEmpty ? (expressionHasCaptures ? ["pos", iterationCount, startingPos!, sawEmpty!, "base.Crawlpos()"] : ["pos", iterationCount, startingPos!, sawEmpty!]) :
expressionHasCaptures ? ["pos", iterationCount, "base.Crawlpos()"] :
@@ -3800,7 +3879,7 @@ namespace System.Text.RegularExpressions.Generator
{
EmitUncaptureUntil(StackPop());
}
- EmitStackPop(
+ EmitStackPop(stackCookie,
!isInLoop ? ["pos"] :
iterationMayBeEmpty ? [sawEmpty!, startingPos!, iterationCount, "pos"] :
[iterationCount, "pos"]);
@@ -4183,6 +4262,7 @@ namespace System.Text.RegularExpressions.Generator
int minIterations = node.M;
int maxIterations = node.N;
+ int stackCookie = CreateStackCookie();
// Special-case some repeaters.
if (minIterations == maxIterations)
@@ -4261,7 +4341,7 @@ namespace System.Text.RegularExpressions.Generator
// need to know where each iteration began so when backtracking we can jump back to that location. This is
// true even if the loop is atomic, as we might need to backtrack within the loop in order to match the
// minimum iteration count.
- EmitStackPush(
+ EmitStackPush(stackCookie,
expressionHasCaptures && iterationMayBeEmpty ? ["base.Crawlpos()", startingPos!, "pos"] :
expressionHasCaptures ? ["base.Crawlpos()", "pos"] :
iterationMayBeEmpty ? [startingPos!, "pos"] :
@@ -4371,13 +4451,14 @@ namespace System.Text.RegularExpressions.Generator
writer.WriteLine("// Unable to match the remainder of the expression after exhausting the loop.");
Goto(originalDoneLabel);
}
- EmitStackPop(iterationMayBeEmpty ?
+ EmitStackPop(0, iterationMayBeEmpty ? // stack cookie handled is explicitly 0 to handle it below
["pos", startingPos!] :
["pos"]);
if (expressionHasCaptures)
{
EmitUncaptureUntil(StackPop());
}
+ EmitStackCookieValidate(stackCookie);
SliceInputSpan();
// If there's a required minimum iteration count, validate now that we've processed enough iterations.
@@ -4487,7 +4568,8 @@ namespace System.Text.RegularExpressions.Generator
writer.WriteLine();
// Store the loop's state
- EmitStackPush(
+ stackCookie = CreateStackCookie();
+ EmitStackPush(stackCookie,
startingPos is not null && startingStackpos is not null ? [startingPos, startingStackpos, iterationCount] :
startingPos is not null ? [startingPos, iterationCount] :
startingStackpos is not null ? [startingStackpos, iterationCount] :
@@ -4501,7 +4583,7 @@ namespace System.Text.RegularExpressions.Generator
// Emit a backtracking section that restores the loop's state and then jumps to the previous done label
string backtrack = ReserveName("LoopBacktrack");
MarkLabel(backtrack, emitSemicolon: false);
- EmitStackPop(
+ EmitStackPop(stackCookie,
startingPos is not null && startingStackpos is not null ? [iterationCount, startingStackpos, startingPos] :
startingPos is not null ? [iterationCount, startingPos] :
startingStackpos is not null ? [iterationCount, startingStackpos] :
@@ -4552,7 +4634,7 @@ namespace System.Text.RegularExpressions.Generator
}
/// <summary>Pushes values on to the backtracking stack.</summary>
- void EmitStackPush(params string[] args)
+ void EmitStackPush(int stackCookie, params string[] args)
{
Debug.Assert(args.Length is >= 1);
@@ -4596,41 +4678,134 @@ namespace System.Text.RegularExpressions.Generator
requiredHelpers.Add(key, lines);
}
+ if (stackCookie != 0)
+ {
+ EmitStackCookie(stackCookie);
+ }
writer.WriteLine($"{HelpersTypeName}.{MethodName}(ref base.runstack!, ref stackpos, {string.Join(", ", args)});");
}
/// <summary>Pops values from the backtracking stack into the specified locations.</summary>
- void EmitStackPop(params string[] args)
+ void EmitStackPop(int stackCookie, params string[] args)
{
Debug.Assert(args.Length is >= 1);
if (args.Length == 1)
{
writer.WriteLine($"{args[0]} = {StackPop()};");
- return;
}
-
- const string MethodName = "StackPop";
- string key = $"{MethodName}{args.Length}";
-
- if (!requiredHelpers.ContainsKey(key))
+ else
{
- var lines = new string[5 + args.Length];
- lines[0] = $"/// <summary>Pops {args.Length} value{(args.Length == 1 ? "" : "s")} from the backtracking stack.</summary>";
- lines[1] = $"[MethodImpl(MethodImplOptions.AggressiveInlining)]";
- lines[2] = $"internal static void {MethodName}(int[] stack, ref int pos{FormatN(", out int arg{0}", args.Length)})";
- lines[3] = $"{{";
- for (int i = 0; i < args.Length; i++)
+ const string MethodName = "StackPop";
+ string key = $"{MethodName}{args.Length}";
+
+ if (!requiredHelpers.ContainsKey(key))
{
- lines[4 + i] = $" arg{i} = stack[--pos];";
+ var lines = new string[5 + args.Length];
+ lines[0] = $"/// <summary>Pops {args.Length} value{(args.Length == 1 ? "" : "s")} from the backtracking stack.</summary>";
+ lines[1] = $"[MethodImpl(MethodImplOptions.AggressiveInlining)]";
+ lines[2] = $"internal static void {MethodName}(int[] stack, ref int pos{FormatN(", out int arg{0}", args.Length)})";
+ lines[3] = $"{{";
+ for (int i = 0; i < args.Length; i++)
+ {
+ lines[4 + i] = $" arg{i} = stack[--pos];";
+ }
+ lines[4 + args.Length] = $"}}";
+
+ requiredHelpers.Add(key, lines);
}
- lines[4 + args.Length] = $"}}";
- requiredHelpers.Add(key, lines);
+ writer.WriteLine($"{HelpersTypeName}.{MethodName}(base.runstack!, ref stackpos, out {string.Join(", out ", args)});");
+ }
+
+ if (stackCookie != 0)
+ {
+ EmitStackCookieValidate(stackCookie);
+ }
+ }
+
+ /// <summary>Initializes a debug stack cookie for a new backtracking stack push.</summary>
+ int CreateStackCookie() =>
+#if DEBUG
+#pragma warning disable RS1035 // Random is banned from generators due to non-determinism, but this Random is seeded with a constant and it's only for debug builds
+ stackCookieGenerator.Next() + 1;
+#pragma warning restore RS1035
+#else
+ 0;
+#endif
+
+ /// <summary>Emits a debug stack cookie for a new backtracking stack push.</summary>
+ void EmitStackCookie(int stackCookie)
+ {
+#if DEBUG
+ EmitStackPush(0, stackCookie.ToString());
+#endif
+ }
+
+ /// <summary>Emits validation for a debug stack cookie.</summary>
+ void EmitStackCookieValidate(int stackCookie)
+ {
+#if DEBUG
+ writer.WriteLine($"{StackCookieValidate(stackCookie)};");
+#endif
+ }
+
+ /// <summary>
+ /// Returns an expression that:
+ /// In debug, pops item 1 from the backtracking stack, pops item 2 and validates it against the cookie, then evaluates to item1.
+ /// In release, pops and evaluates to an item from the backtracking stack.
+ /// </summary>
+ string ValidateStackCookieWithAdditionAndReturnPoppedStack(int stackCookie)
+ {
+#if DEBUG
+ const string MethodName = "ValidateStackCookieWithAdditionAndReturnPoppedStack";
+ if (!requiredHelpers.ContainsKey(MethodName))
+ {
+ requiredHelpers.Add(MethodName,
+ [
+ $"/// <summary>Validates that a stack cookie popped off the backtracking stack holds the expected value. Debug only.</summary>",
+ $"internal static int {MethodName}(int poppedStack, int expectedCookie, int actualCookie)",
+ $"{{",
+ $" expectedCookie += poppedStack;",
+ $" if (expectedCookie != actualCookie)",
+ $" {{",
+ $" throw new Exception($\"Backtracking stack imbalance detected. Expected {{expectedCookie}}. Actual {{actualCookie}}.\");",
+ $" }}",
+ $" return poppedStack;",
+ $"}}",
+ ]);
+ }
+
+ return $"{HelpersTypeName}.{MethodName}({StackPop()}, {stackCookie}, {StackPop()})";
+#else
+ return StackPop();
+#endif
+ }
+
+#if DEBUG
+ /// <summary>Returns an expression that validates and returns a debug stack cookie.</summary>
+ string StackCookieValidate(int stackCookie)
+ {
+ const string MethodName = "ValidateStackCookie";
+ if (!requiredHelpers.ContainsKey(MethodName))
+ {
+ requiredHelpers.Add(MethodName,
+ [
+ $"/// <summary>Validates that a stack cookie popped off the backtracking stack holds the expected value. Debug only.</summary>",
+ $"internal static int {MethodName}(int expected, int actual)",
+ $"{{",
+ $" if (expected != actual)",
+ $" {{",
+ $" throw new Exception($\"Backtracking stack imbalance detected. Expected {{expected}}. Actual {{actual}}.\");",
+ $" }}",
+ $" return actual;",
+ $"}}",
+ ]);
}
- writer.WriteLine($"{HelpersTypeName}.{MethodName}(base.runstack!, ref stackpos, out {string.Join(", out ", args)});");
+ return $"{HelpersTypeName}.{MethodName}({stackCookie}, {StackPop()})";
}
+#endif
/// <summary>Expression for popping the next item from the backtracking stack.</summary>
string StackPop() => "base.runstack![--stackpos]";
diff --git a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCaseEquivalences.cs b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCaseEquivalences.cs
index 4367da61026d..70587b19a6c0 100644
--- a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCaseEquivalences.cs
+++ b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCaseEquivalences.cs
@@ -52,7 +52,7 @@ namespace System.Text.RegularExpressions
// Default
_ => default
};
- return equivalences != default;
+ return !equivalences.IsEmpty;
}
else
{
diff --git a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCharClass.cs b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCharClass.cs
index c56ad4b5b6e0..ed67df681902 100644
--- a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCharClass.cs
+++ b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCharClass.cs
@@ -1054,6 +1054,21 @@ namespace System.Text.RegularExpressions
#endif
}
+ /// <summary>Gets whether the set description string is for two ASCII letters that case to each other under OrdinalIgnoreCase rules.</summary>
+ public static bool SetContainsAsciiOrdinalIgnoreCaseCharacter(string set, Span<char> twoChars)
+ {
+ Debug.Assert(twoChars.Length >= 2);
+ return
+ !IsNegated(set) &&
+ GetSetChars(set, twoChars) == 2 &&
+ twoChars[0] < 128 &&
+ twoChars[1] < 128 &&
+ twoChars[0] != twoChars[1] &&
+ char.IsLetter(twoChars[0]) &&
+ char.IsLetter(twoChars[1]) &&
+ (twoChars[0] | 0x20) == (twoChars[1] | 0x20);
+ }
+
/// <summary>Gets whether we can iterate through the set list pairs in order to completely enumerate the set's contents.</summary>
/// <remarks>This may enumerate negated characters if the set is negated. This will return false if the set has subtraction.</remarks>
private static bool CanEasilyEnumerateSetContents(string set) =>
diff --git a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCompiler.cs b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCompiler.cs
index bac950c6db2f..0008f18f6418 100644
--- a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCompiler.cs
+++ b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCompiler.cs
@@ -460,6 +460,8 @@ namespace System.Text.RegularExpressions
{
case FindNextStartingPositionMode.LeadingString_LeftToRight:
case FindNextStartingPositionMode.LeadingString_OrdinalIgnoreCase_LeftToRight:
+ case FindNextStartingPositionMode.LeadingStrings_LeftToRight:
+ case FindNextStartingPositionMode.LeadingStrings_OrdinalIgnoreCase_LeftToRight:
case FindNextStartingPositionMode.FixedDistanceString_LeftToRight:
EmitIndexOfString_LeftToRight();
break;
@@ -745,15 +747,19 @@ namespace System.Text.RegularExpressions
return false;
}
- // Emits a case-sensitive left-to-right search for a substring.
+ // Emits a case-sensitive left-to-right search for a substring or substrings.
void EmitIndexOfString_LeftToRight()
{
RegexFindOptimizations opts = _regexTree.FindOptimizations;
- Debug.Assert(opts.FindMode is FindNextStartingPositionMode.LeadingString_LeftToRight or FindNextStartingPositionMode.LeadingString_OrdinalIgnoreCase_LeftToRight or FindNextStartingPositionMode.FixedDistanceString_LeftToRight);
+ Debug.Assert(opts.FindMode is FindNextStartingPositionMode.LeadingString_LeftToRight or
+ FindNextStartingPositionMode.LeadingString_OrdinalIgnoreCase_LeftToRight or
+ FindNextStartingPositionMode.FixedDistanceString_LeftToRight or
+ FindNextStartingPositionMode.LeadingStrings_LeftToRight or
+ FindNextStartingPositionMode.LeadingStrings_OrdinalIgnoreCase_LeftToRight);
using RentedLocalBuilder i = RentInt32Local();
- // int i = inputSpan.Slice(pos).IndexOf(prefix);
+ // int i = inputSpan.Slice(pos)...
Ldloca(inputSpan);
Ldloc(pos);
if (opts.FindMode is FindNextStartingPositionMode.FixedDistanceString_LeftToRight &&
@@ -763,11 +769,21 @@ namespace System.Text.RegularExpressions
Add();
}
Call(s_spanSliceIntMethod);
- string literalString = opts.FindMode is FindNextStartingPositionMode.LeadingString_LeftToRight or FindNextStartingPositionMode.LeadingString_OrdinalIgnoreCase_LeftToRight ?
- opts.LeadingPrefix :
- opts.FixedDistanceLiteral.String!;
- LoadSearchValues([literalString], opts.FindMode is FindNextStartingPositionMode.LeadingString_OrdinalIgnoreCase_LeftToRight ? StringComparison.OrdinalIgnoreCase : StringComparison.Ordinal);
- Call(s_spanIndexOfAnySearchValuesString);
+
+ // ...IndexOf(prefix);
+ if (opts.FindMode is FindNextStartingPositionMode.LeadingStrings_LeftToRight or FindNextStartingPositionMode.LeadingStrings_OrdinalIgnoreCase_LeftToRight)
+ {
+ LoadSearchValues(opts.LeadingPrefixes, opts.FindMode is FindNextStartingPositionMode.LeadingStrings_OrdinalIgnoreCase_LeftToRight ? StringComparison.OrdinalIgnoreCase : StringComparison.Ordinal);
+ Call(s_spanIndexOfAnySearchValuesString);
+ }
+ else
+ {
+ string literalString = opts.FindMode is FindNextStartingPositionMode.LeadingString_LeftToRight or FindNextStartingPositionMode.LeadingString_OrdinalIgnoreCase_LeftToRight ?
+ opts.LeadingPrefix :
+ opts.FixedDistanceLiteral.String!;
+ LoadSearchValues([literalString], opts.FindMode is FindNextStartingPositionMode.LeadingString_OrdinalIgnoreCase_LeftToRight ? StringComparison.OrdinalIgnoreCase : StringComparison.Ordinal);
+ Call(s_spanIndexOfAnySearchValuesString);
+ }
Stloc(i);
// if (i < 0) goto ReturnFalse;
@@ -2223,9 +2239,18 @@ namespace System.Text.RegularExpressions
Stloc(startingPos);
int startingSliceStaticPos = sliceStaticPos;
- // Emit the child. The condition expression is a zero-width assertion, which is atomic,
+ // Emit the condition. The condition expression is a zero-width assertion, which is atomic,
// so prevent backtracking into it.
- EmitNode(condition);
+ if (analysis.MayBacktrack(condition))
+ {
+ // Condition expressions are treated like positive lookarounds and thus are implicitly atomic,
+ // so we need to emit the node as atomic if it might backtrack.
+ EmitAtomic(node, null);
+ }
+ else
+ {
+ EmitNode(condition);
+ }
doneLabel = originalDoneLabel;
// After the condition completes successfully, reset the text positions.
@@ -2793,8 +2818,8 @@ namespace System.Text.RegularExpressions
// Emits the node for an atomic.
void EmitAtomic(RegexNode node, RegexNode? subsequent)
{
- Debug.Assert(node.Kind is RegexNodeKind.Atomic or RegexNodeKind.PositiveLookaround or RegexNodeKind.NegativeLookaround, $"Unexpected type: {node.Kind}");
- Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}");
+ Debug.Assert(node.Kind is RegexNodeKind.Atomic or RegexNodeKind.PositiveLookaround or RegexNodeKind.NegativeLookaround or RegexNodeKind.ExpressionConditional, $"Unexpected type: {node.Kind}");
+ Debug.Assert(node.Kind is RegexNodeKind.ExpressionConditional ? node.ChildCount() >= 1 : node.ChildCount() == 1, $"Unexpected number of children: {node.ChildCount()}");
RegexNode child = node.Child(0);
diff --git a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexFindOptimizations.cs b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexFindOptimizations.cs
index f40f48e35a6d..a8dc9f4fd0e5 100644
--- a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexFindOptimizations.cs
+++ b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexFindOptimizations.cs
@@ -137,7 +137,28 @@ namespace System.Text.RegularExpressions
return;
}
- // We're now left-to-right only and looking for sets.
+ // We're now left-to-right only and looking for multiple prefixes and/or sets.
+
+ // If there are multiple leading strings, we can search for any of them.
+ if (compiled)
+ {
+ if (RegexPrefixAnalyzer.FindPrefixes(root, ignoreCase: true) is { Length: > 1 } caseInsensitivePrefixes)
+ {
+ LeadingPrefixes = caseInsensitivePrefixes;
+ FindMode = FindNextStartingPositionMode.LeadingStrings_OrdinalIgnoreCase_LeftToRight;
+ return;
+ }
+
+ // TODO: While some benchmarks benefit from this significantly, others regressed a bit (in particular those with few
+ // matches). Before enabling this, we need to investigate the performance impact on real-world scenarios,
+ // and see if there are ways to reduce the impact.
+ //if (RegexPrefixAnalyzer.FindPrefixes(root, ignoreCase: false) is { Length: > 1 } caseSensitivePrefixes)
+ //{
+ // LeadingPrefixes = caseSensitivePrefixes;
+ // FindMode = FindNextStartingPositionMode.LeadingStrings_LeftToRight;
+ // return;
+ //}
+ }
// Build up a list of all of the sets that are a fixed distance from the start of the expression.
List<FixedDistanceSet>? fixedDistanceSets = RegexPrefixAnalyzer.FindFixedDistanceSets(root, thorough: !interpreter);
@@ -244,6 +265,9 @@ namespace System.Text.RegularExpressions
/// <summary>Gets the leading prefix. May be an empty string.</summary>
public string LeadingPrefix { get; } = string.Empty;
+ /// <summary>Gets the leading prefixes. May be an empty array.</summary>
+ public string[] LeadingPrefixes { get; } = Array.Empty<string>();
+
/// <summary>When in fixed distance literal mode, gets the literal and how far it is from the start of the pattern.</summary>
public (char Char, string? String, int Distance) FixedDistanceLiteral { get; }
@@ -767,10 +791,16 @@ namespace System.Text.RegularExpressions
return false;
}
+ // Not supported in the interpreter, but we could end up here for patterns so complex the compiler gave up on them.
+
+ case FindNextStartingPositionMode.LeadingStrings_LeftToRight:
+ case FindNextStartingPositionMode.LeadingStrings_OrdinalIgnoreCase_LeftToRight:
+ return true;
+
// Nothing special to look for. Just return true indicating this is a valid position to try to match.
default:
- Debug.Assert(FindMode == FindNextStartingPositionMode.NoSearch);
+ Debug.Assert(FindMode == FindNextStartingPositionMode.NoSearch, $"Unexpected FindMode {FindMode}");
return true;
}
}
@@ -810,6 +840,11 @@ namespace System.Text.RegularExpressions
/// <summary>A multi-character ordinal case-insensitive substring at the beginning of the pattern.</summary>
LeadingString_OrdinalIgnoreCase_LeftToRight,
+ /// <summary>Multiple leading prefix strings</summary>
+ LeadingStrings_LeftToRight,
+ /// <summary>Multiple leading ordinal case-insensitive prefix strings</summary>
+ LeadingStrings_OrdinalIgnoreCase_LeftToRight,
+
/// <summary>A set starting the pattern.</summary>
LeadingSet_LeftToRight,
/// <summary>A set starting the right-to-left pattern.</summary>
diff --git a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexNode.cs b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexNode.cs
index 5445f696423e..335f9165856f 100644
--- a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexNode.cs
+++ b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexNode.cs
@@ -2561,14 +2561,7 @@ namespace System.Text.RegularExpressions
{
// In particular we want to look for sets that contain only the upper and lowercase variant
// of the same ASCII letter.
- if (RegexCharClass.IsNegated(child.Str!) ||
- RegexCharClass.GetSetChars(child.Str!, twoChars) != 2 ||
- twoChars[0] >= 128 ||
- twoChars[1] >= 128 ||
- twoChars[0] == twoChars[1] ||
- !char.IsLetter(twoChars[0]) ||
- !char.IsLetter(twoChars[1]) ||
- ((twoChars[0] | 0x20) != (twoChars[1] | 0x20)))
+ if (!RegexCharClass.SetContainsAsciiOrdinalIgnoreCaseCharacter(child.Str!, twoChars))
{
break;
}
diff --git a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexPrefixAnalyzer.cs b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexPrefixAnalyzer.cs
index 1658e5bcdf2a..97aba89b9804 100644
--- a/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexPrefixAnalyzer.cs
+++ b/src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexPrefixAnalyzer.cs
@@ -11,6 +11,316 @@ namespace System.Text.RegularExpressions
/// <summary>Detects various forms of prefixes in the regular expression that can help FindFirstChars optimize its search.</summary>
internal static class RegexPrefixAnalyzer
{
+ /// <summary>Finds an array of multiple prefixes that a node can begin with.</summary>
+ /// <param name="node">The node to search.</param>
+ /// <param name="ignoreCase">true to find ordinal ignore-case prefixes; false for case-sensitive.</param>
+ /// <returns>
+ /// If a fixed set of prefixes is found, such that a match for this node is guaranteed to begin
+ /// with one of those prefixes, an array of those prefixes is returned. Otherwise, null.
+ /// </returns>
+ public static string[]? FindPrefixes(RegexNode node, bool ignoreCase)
+ {
+ // Minimum string length for prefixes to be useful. If any prefix has length 1,
+ // then we're generally better off just using IndexOfAny with chars.
+ const int MinPrefixLength = 2;
+
+ // Arbitrary string length limit (with some wiggle room) to avoid creating strings that are longer than is useful and consuming too much memory.
+ const int MaxPrefixLength = 8;
+
+ // Arbitrary limit on the number of prefixes to find. If we find more than this, we're likely to be spending too much time finding prefixes that won't be useful.
+ const int MaxPrefixes = 16;
+
+ // Analyze the node to find prefixes.
+ List<StringBuilder> results = [new StringBuilder()];
+ FindPrefixesCore(node, results, ignoreCase);
+
+ // If we found too many prefixes or if any found is too short, fail.
+ if (results.Count > MaxPrefixes || !results.TrueForAll(sb => sb.Length >= MinPrefixLength))
+ {
+ return null;
+ }
+
+ // Return the prefixes.
+ string[] resultStrings = new string[results.Count];
+ for (int i = 0; i < results.Count; i++)
+ {
+ resultStrings[i] = results[i].ToString();
+ }
+ return resultStrings;
+
+ // <summary>
+ // Updates the results list with found prefixes. All existing strings in the list are treated as existing
+ // discovered prefixes prior to the node being processed. The method returns true if subsequent nodes after
+ // this one should be examined, or returns false if they shouldn't be because the node wasn't guaranteed
+ // to be fully processed.
+ // </summary>
+ static bool FindPrefixesCore(RegexNode node, List<StringBuilder> results, bool ignoreCase)
+ {
+ // If we're too deep to analyze further, we can't trust what we've already computed, so stop iterating.
+ // Also bail if any of our results is already hitting the threshold, or if this node is RTL, which is
+ // not worth the complexity of handling.
+ if (!StackHelper.TryEnsureSufficientExecutionStack() ||
+ !results.TrueForAll(sb => sb.Length < MaxPrefixLength) ||
+ (node.Options & RegexOptions.RightToLeft) != 0)
+ {
+ return false;
+ }
+
+ // These limits are approximations. We'll stop trying to make strings longer once we exceed the max length,
+ // and if we exceed the max number of prefixes by a non-trivial amount, we'll fail the operation.
+ Span<char> setChars = stackalloc char[MaxPrefixes]; // limit how many chars we get from a set based on the max prefixes we care about
+
+ // Loop down the left side of the tree, looking for a starting node we can handle. We only loop through
+ // atomic and capture nodes, as the child is guaranteed to execute once, as well as loops with a positive
+ // minimum and thus at least one guaranteed iteration.
+ while (true)
+ {
+ switch (node.Kind)
+ {
+ // These nodes are all guaranteed to execute at least once, so we can just
+ // skip through them to their child.
+ case RegexNodeKind.Atomic:
+ case RegexNodeKind.Capture:
+ node = node.Child(0);
+ continue;
+
+ // Zero-width anchors and assertions don't impact a prefix and may be skipped over.
+ case RegexNodeKind.Bol:
+ case RegexNodeKind.Eol:
+ case RegexNodeKind.Boundary:
+ case RegexNodeKind.ECMABoundary:
+ case RegexNodeKind.NonBoundary:
+ case RegexNodeKind.NonECMABoundary:
+ case RegexNodeKind.Beginning:
+ case RegexNodeKind.Start:
+ case RegexNodeKind.EndZ:
+ case RegexNodeKind.End:
+ case RegexNodeKind.Empty:
+ case RegexNodeKind.UpdateBumpalong:
+ case RegexNodeKind.PositiveLookaround:
+ case RegexNodeKind.NegativeLookaround:
+ return true;
+
+ // If we hit a single character, we can just return that character.
+ // This is only relevant for case-sensitive searches, as for case-insensitive we'd have sets for anything
+ // that produces a different result when case-folded, or for strings composed entirely of characters that
+ // don't participate in case conversion. Single character loops are handled the same as single characters
+ // up to the min iteration limit. We can continue processing after them as well if they're repeaters such
+ // that their min and max are the same.
+ case RegexNodeKind.One or RegexNodeKind.Oneloop or RegexNodeKind.Onelazy or RegexNodeKind.Oneloopatomic when !ignoreCase || !RegexCharClass.ParticipatesInCaseConversion(node.Ch):
+ {
+ int reps = node.Kind is RegexNodeKind.One ? 1 : Math.Min(node.M, MaxPrefixLength);
+ foreach (StringBuilder sb in results)
+ {
+ sb.Append(node.Ch, reps);
+ }
+ return node.Kind is RegexNodeKind.One || reps == node.N;
+ }
+
+ // If we hit a string, we can just return that string.
+ // As with One above, this is only relevant for case-sensitive searches.
+ case RegexNodeKind.Multi:
+ if (!ignoreCase)
+ {
+ foreach (StringBuilder sb in results)
+ {
+ sb.Append(node.Str);
+ }
+ }
+ else
+ {
+ // If we're ignoring case, then only append up through characters that don't participate in case conversion.
+ // If there are any beyond that, we can't go further and need to stop with what we have.
+ foreach (char c in node.Str!)
+ {
+ if (RegexCharClass.ParticipatesInCaseConversion(c))
+ {
+ return false;
+ }
+
+ foreach (StringBuilder sb in results)
+ {
+ sb.Append(c);
+ }
+ }
+ }
+ return true;
+
+ // For case-sensitive, try to extract the characters that comprise it, and if there are
+ // any and there aren't more than the max number of prefixes, we can return
+ // them each as a prefix. Effectively, this is an alternation of the characters
+ // that comprise the set. For case-insensitive, we need the set to be two ASCII letters that case fold to the same thing.
+ // As with One and loops, set loops are handled the same as sets up to the min iteration limit.
+ case RegexNodeKind.Set or RegexNodeKind.Setloop or RegexNodeKind.Setlazy or RegexNodeKind.Setloopatomic when !RegexCharClass.IsNegated(node.Str!): // negated sets are too complex to analyze
+ {
+ int charCount = RegexCharClass.GetSetChars(node.Str!, setChars);
+ if (charCount == 0)
+ {
+ return false;
+ }
+
+ int reps = node.Kind is RegexNodeKind.Set ? 1 : Math.Min(node.M, MaxPrefixLength);
+ if (!ignoreCase)
+ {
+ int existingCount = results.Count;
+
+ // Duplicate all of the existing strings for all of the new suffixes, other than the first.
+ foreach (char suffix in setChars.Slice(1, charCount - 1))
+ {
+ for (int existing = 0; existing < existingCount; existing++)
+ {
+ StringBuilder newSb = new StringBuilder().Append(results[existing]);
+ newSb.Append(suffix, reps);
+ results.Add(newSb);
+ }
+ }
+
+ // Then append the first suffix to all of the existing strings.
+ for (int existing = 0; existing < existingCount; existing++)
+ {
+ results[existing].Append(setChars[0], reps);
+ }
+ }
+ else
+ {
+ // For ignore-case, we currently only handle the simple (but common) case of a single
+ // ASCII character that case folds to the same char.
+ if (!RegexCharClass.SetContainsAsciiOrdinalIgnoreCaseCharacter(node.Str!, setChars))
+ {
+ return false;
+ }
+
+ // Append it to each.
+ foreach (StringBuilder sb in results)
+ {
+ sb.Append(setChars[1], reps);
+ }
+ }
+
+ return node.Kind is RegexNodeKind.Set || reps == node.N;
+ }
+
+ case RegexNodeKind.Concatenate:
+ {
+ int childCount = node.ChildCount();
+ for (int i = 0; i < childCount; i++)
+ {
+ if (!FindPrefixesCore(node.Child(i), results, ignoreCase))
+ {
+ return false;
+ }
+ }
+ }
+ return true;
+
+ // We can append any guaranteed iterations as if they were a concatenation.
+ case RegexNodeKind.Loop or RegexNodeKind.Lazyloop when node.M > 0:
+ {
+ int limit = Math.Min(node.M, MaxPrefixLength); // MaxPrefixLength here is somewhat arbitrary, as a single loop iteration could yield multiple chars
+ for (int i = 0; i < limit; i++)
+ {
+ if (!FindPrefixesCore(node.Child(0), results, ignoreCase))
+ {
+ return false;
+ }
+ }
+ return limit == node.N;
+ }
+
+ // For alternations, we need to find a prefix for every branch; if we can't compute a
+ // prefix for any one branch, we can't trust the results and need to give up, since we don't
+ // know if our set of prefixes is complete.
+ case RegexNodeKind.Alternate:
+ {
+ // If there are more children than our maximum, just give up immediately, as we
+ // won't be able to get a prefix for every branch and have it be within our max.
+ int childCount = node.ChildCount();
+ Debug.Assert(childCount >= 2); // otherwise it would have been optimized out
+ if (childCount > MaxPrefixes)
+ {
+ return false;
+ }
+
+ // Build up the list of all prefixes across all branches.
+ List<StringBuilder>? allBranchResults = null;
+ List<StringBuilder>? alternateBranchResults = [new StringBuilder()];
+ for (int i = 0; i < childCount; i++)
+ {
+ _ = FindPrefixesCore(node.Child(i), alternateBranchResults, ignoreCase);
+
+ Debug.Assert(alternateBranchResults.Count > 0);
+ foreach (StringBuilder sb in alternateBranchResults)
+ {
+ // If a branch yields an empty prefix, then none of the other branches
+ // matter, e.g. if the pattern is abc(def|ghi|), then this would result
+ // in prefixes abcdef, abcghi, and abc, and since abc is a prefix of both
+ // abcdef and abcghi, the former two would never be used.
+ if (sb.Length == 0)
+ {
+ return false;
+ }
+ }
+
+ if (allBranchResults is null)
+ {
+ allBranchResults = alternateBranchResults;
+ alternateBranchResults = [new StringBuilder()];
+ }
+ else
+ {
+ allBranchResults.AddRange(alternateBranchResults);
+ alternateBranchResults.Clear();
+ alternateBranchResults.Add(new StringBuilder());
+ }
+ }
+
+ // At this point, we know we can successfully incorporate the alternation's results
+ // into the main results.
+
+ // If the results are currently empty (meaning a single empty StringBuilder), we can remove
+ // that builder and just replace the results with the alternation's results. We would otherwise
+ // be creating a dot product of every builder in the results with every branch's result, which
+ // is logically the same thing.
+ if (results.Count == 1 && results[0].Length == 0)
+ {
+ results.Clear();
+ results.AddRange(allBranchResults!);
+ }
+ else
+ {
+ // Duplicate all of the existing strings for all of the new suffixes, other than the first.
+ int existingCount = results.Count;
+ for (int i = 1; i < allBranchResults!.Count; i++)
+ {
+ StringBuilder suffix = allBranchResults[i];
+ for (int existing = 0; existing < existingCount; existing++)
+ {
+ StringBuilder newSb = new StringBuilder().Append(results[existing]);
+ newSb.Append(suffix);
+ results.Add(newSb);
+ }
+ }
+
+ // Then append the first suffix to all of the existing strings.
+ for (int existing = 0; existing < existingCount; existing++)
+ {
+ results[existing].Append(allBranchResults[0]);
+ }
+ }
+ }
+
+ // We don't know that we fully processed every branch, so we can't iterate through what comes after this node.
+ // The results were successfully updated, but return false to indicate that nothing after this node should be examined.
+ return false;
+
+ // Something else we don't recognize, so stop iterating.
+ default:
+ return false;
+ }
+ }
+ }
+ }
+
/// <summary>Computes the leading substring in <paramref name="node"/>; may be empty.</summary>
public static string FindPrefix(RegexNode node)
{
diff --git a/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/Regex.KnownPattern.Tests.cs b/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/Regex.KnownPattern.Tests.cs
index b34ede47dd2e..993eb8019b8e 100644
--- a/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/Regex.KnownPattern.Tests.cs
+++ b/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/Regex.KnownPattern.Tests.cs
@@ -1409,6 +1409,7 @@ namespace System.Text.RegularExpressions.Tests
}
}
+ [ActiveIssue("https://github.com/dotnet/runtime/issues/98962")]
[Theory]
[MemberData(nameof(RecreationalRegex_Rectangle_MemberData))]
[OuterLoop("May take several seconds")]
diff --git a/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/RegexGeneratorOutputTests.cs b/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/RegexGeneratorOutputTests.cs
index 181c978a3766..57d4232ee1ed 100644
--- a/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/RegexGeneratorOutputTests.cs
+++ b/src/libraries/System.Text.RegularExpressions/tests/FunctionalTests/RegexGeneratorOutputTests.cs
@@ -2,15 +2,24 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
+using System.Diagnostics;
using System.IO;
+using System.Linq;
+using System.Text.RegularExpressions.Generator;
using System.Threading.Tasks;
using Xunit;
namespace System.Text.RegularExpressions.Tests
{
- [ConditionalClass(typeof(PlatformDetection), nameof(PlatformDetection.IsReflectionEmitSupported), nameof(PlatformDetection.IsNotMobile), nameof(PlatformDetection.IsNotBrowser))]
+ [ConditionalClass(typeof(RegexGeneratorOutputTests), nameof(GeneratorOutputTestsSupported))]
public partial class RegexGeneratorOutputTests
{
+ public static bool GeneratorOutputTestsSupported =>
+ PlatformDetection.IsReflectionEmitSupported &&
+ PlatformDetection.IsNotMobile &&
+ PlatformDetection.IsNotBrowser &&
+ typeof(RegexGenerator).Assembly.GetCustomAttributes(false).OfType<DebuggableAttribute>().Any(da => da.IsJITTrackingEnabled); // output differs between debug and release
+
// This exists to ensure we're aware of any egregious breaks to formatting / readability.
// Any updates that impact the generated code in these baselines will need to be updated
// as changes are made to the code emitted by the generator.
@@ -258,6 +267,7 @@ namespace System.Text.RegularExpressions.Tests
loop_iteration = 0;
LoopBody:
+ Utilities.StackPush(ref base.runstack!, ref stackpos, 143337952);
Utilities.StackPush(ref base.runstack!, ref stackpos, base.Crawlpos(), pos);
loop_iteration++;
@@ -311,6 +321,7 @@ namespace System.Text.RegularExpressions.Tests
}
pos = base.runstack![--stackpos];
UncaptureUntil(base.runstack![--stackpos]);
+ Utilities.ValidateStackCookie(143337952, base.runstack![--stackpos]);
slice = inputSpan.Slice(pos);
LoopEnd:;
//}
@@ -381,6 +392,32 @@ namespace System.Text.RegularExpressions.Tests
(WordCategoriesMask & (1 << (int)CharUnicodeInfo.GetUnicodeCategory(ch))) != 0;
}
+ /// <summary>Pushes 1 value onto the backtracking stack.</summary>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ internal static void StackPush(ref int[] stack, ref int pos, int arg0)
+ {
+ // If there's space available for the value, store it.
+ int[] s = stack;
+ int p = pos;
+ if ((uint)p < (uint)s.Length)
+ {
+ s[p] = arg0;
+ pos++;
+ return;
+ }
+
+ // Otherwise, resize the stack to make room and try again.
+ WithResize(ref stack, ref pos, arg0);
+
+ // <summary>Resize the backtracking stack array and push 1 value onto the stack.</summary>
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void WithResize(ref int[] stack, ref int pos, int arg0)
+ {
+ Array.Resize(ref stack, (pos + 0) * 2);
+ StackPush(ref stack, ref pos, arg0);
+ }
+ }
+
/// <summary>Pushes 2 values onto the backtracking stack.</summary>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal static void StackPush(ref int[] stack, ref int pos, int arg0, int arg1)
@@ -407,6 +444,16 @@ namespace System.Text.RegularExpressions.Tests
StackPush(ref stack, ref pos, arg0, arg1);
}
}
+
+ /// <summary>Validates that a stack cookie popped off the backtracking stack holds the expected value. Debug only.</summary>
+ internal static int ValidateStackCookie(int expected, int actual)
+ {
+ if (expected != actual)
+ {
+ throw new Exception($"Backtracking stack imbalance detected. Expected {expected}. Actual {actual}.");
+ }
+ return actual;
+ }
}
}
"""
diff --git a/src/libraries/System.Text.RegularExpressions/tests/UnitTests/RegexPrefixAnalyzerTests.cs b/src/libraries/System.Text.RegularExpressions/tests/UnitTests/RegexPrefixAnalyzerTests.cs
index 9c592d7c57f6..783b45e9d3c9 100644
--- a/src/libraries/System.Text.RegularExpressions/tests/UnitTests/RegexPrefixAnalyzerTests.cs
+++ b/src/libraries/System.Text.RegularExpressions/tests/UnitTests/RegexPrefixAnalyzerTests.cs
@@ -70,6 +70,76 @@ namespace System.Text.RegularExpressions.Tests
FindFirstCharClass(string.Concat(Enumerable.Repeat($"(a?", nesting).Concat(Enumerable.Repeat(")*", nesting))), 0, null);
}
+ [Theory]
+ // case-sensitive
+ [InlineData("abc", new[] { "abc" }, false)]
+ [InlineData("(abc+|bcd+)", new[] { "abc", "bcd" }, false)]
+ [InlineData("(ab+c|bcd+)", new[] { "ab", "bcd" }, false)]
+ [InlineData("(ab+c|bcd+)*", null, false)]
+ [InlineData("(ab+c|bcd+)+", new[] { "ab", "bcd" }, false)]
+ [InlineData("(ab+c|bcd+){3,5}", new[] { "ab", "bcd" }, false)]
+ [InlineData("abc|def", new[] { "abc", "def" }, false)]
+ [InlineData("ab{4}c|def{5}|g{2,4}h", new[] { "abbbbc", "defffff", "gg" }, false)]
+ [InlineData("abc|def|(ghi|jklm)", new[] { "abc", "def", "ghi", "jklm" }, false)]
+ [InlineData("abc[def]ghi", new[] { "abcdghi", "abceghi", "abcfghi" }, false)]
+ [InlineData("abc[def]ghi|[jkl]m", new[] { "abcdghi", "abceghi", "abcfghi", "jm", "km", "lm" }, false)]
+ [InlineData("agggtaaa|tttaccct", new[] { "agggtaaa", "tttaccct" }, false)]
+ [InlineData("[cgt]gggtaaa|tttaccc[acg]", new[] { "cgggtaaa", "ggggtaaa", "tgggtaaa", "tttaccca", "tttacccc", "tttacccg" }, false)]
+ [InlineData("a[act]ggtaaa|tttacc[agt]t", new[] { "aaggtaaa", "acggtaaa", "atggtaaa", "tttaccat", "tttaccgt", "tttacctt" }, false)]
+ [InlineData("ag[act]gtaaa|tttac[agt]ct", new[] { "agagtaaa", "agcgtaaa", "agtgtaaa", "tttacact", "tttacgct", "tttactct" }, false)]
+ [InlineData("agg[act]taaa|ttta[agt]cct", new[] { "aggataaa", "aggctaaa", "aggttaaa", "tttaacct", "tttagcct", "tttatcct" }, false)]
+ [InlineData(@"\b(abc|def)\b", new[] { "abc", "def" }, false)]
+ [InlineData("^(abc|def)$", new[] { "abc", "def" }, false)]
+ [InlineData("abcdefg|h", null, false)]
+ [InlineData("abc[def]ghi|[jkl]", null, false)]
+ [InlineData("[12][45][789]", new[] { "147", "148", "149", "157", "158", "159", "247", "248", "249", "257", "258", "259" }, false)]
+ [InlineData("[12]a[45]b[789]c", new[] { "1a4b7c", "1a4b8c", "1a4b9c", "1a5b7c", "1a5b8c", "1a5b9c", "2a4b7c", "2a4b8c", "2a4b9c", "2a5b7c", "2a5b8c", "2a5b9c" }, false)]
+ [InlineData("(abc){3}|(def){3}", new[] { "abcabcabc", "defdefdef" }, false)]
+ [InlineData("(abc){4,8}|(def){2,3}", new[] { "abcabcabc", "defdef" }, false)]
+ [InlineData("(abc){4,8}|(de+f){2,3}", new[] { "abcabcabc", "de" }, false)]
+ [InlineData("(ab{2}c){4,8}|(de+f){2,3}", new[] { "abbcabbc", "de" }, false)]
+ // case-insensitive
+ [InlineData("[Aa][Bb][Cc]", new[] { "abc" }, true)]
+ [InlineData("[Aa][Bbc][Cc]", null, true)]
+ [InlineData(":[Aa]![Bb]@", new[] { ":a!b@" }, true)]
+ [InlineData("(?i)abc", new[] { "abc" }, true)]
+ [InlineData("(?i)(abc+|bcd+)", new[] { "abc", "bcd" }, true)]
+ [InlineData("(?i)(ab+c|bcd+)", new[] { "ab", "bcd" }, true)]
+ [InlineData("(?i)(ab+c|bcd+)*", null, true)]
+ [InlineData("(?i)(ab+c|bcd+)+", new[] { "ab", "bcd" }, true)]
+ [InlineData("(?i)(ab+c|bcd+){3,5}", new[] { "ab", "bcd" }, true)]
+ [InlineData("(?i)abc|def", new[] { "abc", "def" }, true)]
+ [InlineData("(?i)ab{4}c|def{5}|g{2,4}h", new[] { "abbbbc", "defffff", "gg" }, true)]
+ [InlineData("(?i)(((?>abc)|(?>def)))", new[] { "abc", "def" }, true)]
+ [InlineData("(?i)(abc|def|(ghi|jklm))", null, true)]
+ [InlineData("(?i)(abc|def|(ghi|jlmn))", new[] { "abc", "def", "ghi", "jlmn" }, true)]
+ [InlineData("abc", null, true)]
+ [InlineData("abc|def", null, true)]
+ [InlineData("abc|def|(ghi|jklm)", null, true)]
+ [InlineData("://[Aa][Bb]|[Cc]@!", new[] { "://ab", "c@!" }, true)]
+ [InlineData("(?i)((abc){4,8}|(def){2,3})", new[] { "abcabcab", "defdef" }, true)]
+ [InlineData("(?i)((abc){4,8}|(de+f){2,3})", new[] { "abcabcab", "de" }, true)]
+ [InlineData("(?i)((ab{2}c){4,8}|(de+f){2,3})", new[] { "abbcabbc", "de" }, true)]
+ public void FindPrefixes(string pattern, string[] expectedSet, bool ignoreCase)
+ {
+ RegexTree tree = RegexParser.Parse(pattern, RegexOptions.None, CultureInfo.InvariantCulture);
+ string[] actual = RegexPrefixAnalyzer.FindPrefixes(tree.Root, ignoreCase);
+
+ if (expectedSet is null)
+ {
+ Assert.Null(actual);
+ }
+ else
+ {
+ Assert.NotNull(actual);
+
+ Array.Sort(actual, StringComparer.Ordinal);
+ Array.Sort(expectedSet, StringComparer.Ordinal);
+
+ Assert.Equal(expectedSet, actual);
+ }
+ }
+
private static string FormatSet(string set)
{
if (set is null)
diff --git a/src/libraries/System.Threading.ThreadPool/tests/ThreadPoolTests.cs b/src/libraries/System.Threading.ThreadPool/tests/ThreadPoolTests.cs
index 0b51d5e07a6e..c96ad22b47b5 100644
--- a/src/libraries/System.Threading.ThreadPool/tests/ThreadPoolTests.cs
+++ b/src/libraries/System.Threading.ThreadPool/tests/ThreadPoolTests.cs
@@ -6,6 +6,8 @@ using System.Diagnostics;
using System.Diagnostics.Tracing;
using System.IO;
using System.Linq;
+using System.Net.Sockets;
+using System.Net;
using System.Reflection;
using System.Threading.Tasks;
using System.Threading.Tests;
@@ -1160,6 +1162,95 @@ namespace System.Threading.ThreadPools.Tests
}).Dispose();
}
+ private sealed class RuntimeEventListener : EventListener
+ {
+ private const string ClrProviderName = "Microsoft-Windows-DotNETRuntime";
+ private const EventKeywords ThreadingKeyword = (EventKeywords)0x10000;
+
+ public volatile int tpIOEnqueue = 0;
+ public volatile int tpIODequeue = 0;
+ public ManualResetEvent tpWaitIOEnqueueEvent = new ManualResetEvent(false);
+ public ManualResetEvent tpWaitIODequeueEvent = new ManualResetEvent(false);
+
+ protected override void OnEventSourceCreated(EventSource eventSource)
+ {
+ if (eventSource.Name.Equals(ClrProviderName))
+ {
+ EnableEvents(eventSource, EventLevel.Verbose, ThreadingKeyword);
+ }
+
+ base.OnEventSourceCreated(eventSource);
+ }
+
+ protected override void OnEventWritten(EventWrittenEventArgs eventData)
+ {
+ if (eventData.EventName.Equals("ThreadPoolIOEnqueue"))
+ {
+ Interlocked.Increment(ref tpIOEnqueue);
+ tpWaitIOEnqueueEvent.Set();
+ }
+ else if (eventData.EventName.Equals("ThreadPoolIODequeue"))
+ {
+ Interlocked.Increment(ref tpIODequeue);
+ tpWaitIODequeueEvent.Set();
+ }
+ }
+ }
+
+ [ConditionalFact(nameof(IsThreadingAndRemoteExecutorSupported), nameof(UseWindowsThreadPool))]
+ public void ReadWriteAsyncTest()
+ {
+ RemoteExecutor.Invoke(async () =>
+ {
+ using (RuntimeEventListener eventListener = new RuntimeEventListener())
+ {
+ TaskCompletionSource<int> portTcs = new TaskCompletionSource<int>();
+ TaskCompletionSource<bool> readAsyncReadyTcs = new TaskCompletionSource<bool>();
+
+ async Task StartListenerAsync()
+ {
+ using TcpListener listener = new TcpListener(IPAddress.Loopback, 0);
+ listener.Start();
+ int port = ((IPEndPoint)listener.LocalEndpoint).Port;
+ portTcs.SetResult(port);
+ using TcpClient client = await listener.AcceptTcpClientAsync();
+ using (NetworkStream stream = client.GetStream())
+ {
+ byte[] buffer = new byte[1];
+ Task readAsyncTask = stream.ReadAsync(buffer, 0, buffer.Length);
+ readAsyncReadyTcs.SetResult(true);
+ await readAsyncTask;
+ }
+ listener.Stop();
+ }
+
+ async Task StartClientAsync()
+ {
+ int port = await portTcs.Task;
+ using (TcpClient client = new TcpClient(new IPEndPoint(IPAddress.Loopback, 0)))
+ {
+ await client.ConnectAsync(IPAddress.Loopback, port);
+ using (NetworkStream stream = client.GetStream())
+ {
+ bool readAsyncReady = await readAsyncReadyTcs.Task;
+ byte[] data = new byte[1];
+ await stream.WriteAsync(data, 0, data.Length);
+ }
+ }
+ }
+
+ Task listenerTask = StartListenerAsync();
+ Task clientTask = StartClientAsync();
+ await Task.WhenAll(listenerTask, clientTask);
+ ManualResetEvent[] waitEvents = [eventListener.tpWaitIOEnqueueEvent, eventListener.tpWaitIODequeueEvent];
+
+ Assert.True(WaitHandle.WaitAll(waitEvents, TimeSpan.FromSeconds(15))); // Assert that there wasn't a timeout
+ Assert.True(eventListener.tpIOEnqueue > 0);
+ Assert.True(eventListener.tpIODequeue > 0);
+ }
+ }).Dispose();
+ }
+
public static bool IsThreadingAndRemoteExecutorSupported =>
PlatformDetection.IsThreadingSupported && RemoteExecutor.IsSupported;
@@ -1169,6 +1260,7 @@ namespace System.Threading.ThreadPools.Tests
return useWindowsThreadPool;
}
- private static bool UsePortableThreadPool { get; } = !GetUseWindowsThreadPool();
+ private static bool UseWindowsThreadPool { get; } = GetUseWindowsThreadPool();
+ private static bool UsePortableThreadPool { get; } = !UseWindowsThreadPool;
}
}
diff --git a/src/libraries/sendtohelixhelp.proj b/src/libraries/sendtohelixhelp.proj
index 5e7c3942e240..7c668b034712 100644
--- a/src/libraries/sendtohelixhelp.proj
+++ b/src/libraries/sendtohelixhelp.proj
@@ -240,9 +240,20 @@
</ItemGroup>
</Target>
+ <!-- XUnitLogChecker required configuration -->
<ItemGroup Condition="Exists('$(XUnitLogCheckerLibrariesOutDir)')">
<HelixCorrelationPayload Include="$(XUnitLogCheckerLibrariesOutDir)" />
- </ItemGroup>
+ <HelixCorrelationPayload Condition="'$(WindowsShell)' == 'true'" Include="dotnet-sos">
+ <Destination>sos</Destination>
+ <Uri>https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-tools/nuget/v3/flat2/dotnet-sos/$(DotnetSosVersion)/dotnet-sos.$(DotnetSosVersion).nupkg</Uri>
+ </HelixCorrelationPayload>
+ </ItemGroup>
+
+ <PropertyGroup Condition="'$(TargetOS)' == 'windows'">
+ <NtSymbolPathEnvVar>set _NT_SYMBOL_PATH=%25HELIX_CORRELATION_PAYLOAD%25%3B%25HELIX_CORRELATION_PAYLOAD%25\PDB%3B%25HELIX_CORRELATION_PAYLOAD%25\shared\$(MicrosoftNetCoreAppFrameworkName)\$(ProductVersion)</NtSymbolPathEnvVar>
+ <ExecuteDotNetSos>%25HELIX_CORRELATION_PAYLOAD%25\dotnet %25HELIX_CORRELATION_PAYLOAD%25\sos\tools\net$(DotnetSosTargetFrameworkVersion)\any\dotnet-sos.dll install --architecture $(TargetArchitecture)</ExecuteDotNetSos>
+ <HelixPreCommands>$(HelixPreCommands);$(NtSymbolPathEnvVar);$(ExecuteDotNetSos)</HelixPreCommands>
+ </PropertyGroup>
<!--
Create all the Helix data to start a set of jobs. Create a set of work items, one for each libraries
diff --git a/src/libraries/tests.proj b/src/libraries/tests.proj
index 4ab8fa568e3f..45efdee6be06 100644
--- a/src/libraries/tests.proj
+++ b/src/libraries/tests.proj
@@ -400,6 +400,9 @@
<!-- Issue: https://github.com/dotnet/runtime/issues/95795 -->
<ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Globalization.Tests\Hybrid\System.Globalization.Hybrid.WASM.Tests.csproj" />
<ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Globalization.Calendars.Tests\Hybrid\System.Globalization.Calendars.Hybrid.WASM.Tests.csproj" />
+ <!-- Issue: https://github.com/dotnet/runtime/issues/98406 -->
+ <!-- Issue: https://github.com/dotnet/runtime/issues/98101 -->
+ <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices.JavaScript\tests\System.Runtime.InteropServices.JavaScript.UnitTests\System.Runtime.InteropServices.JavaScript.Tests.csproj" />
</ItemGroup>
<ItemGroup Condition="'$(TargetOS)' == 'browser' and '$(WasmEnableThreads)' != 'true' and '$(RunDisabledWasmTests)' != 'true'">
@@ -447,6 +450,14 @@
<ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.SourceGeneration.Tests\System.Text.Json.SourceGeneration.Roslyn4.4.Tests.csproj"
Condition="'$(TargetOS)' == 'linux'" />
+ <!-- https://github.com/dotnet/runtime/issues/98795 -->
+ <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Globalization.Calendars.Tests\System.Globalization.Calendars.Tests.csproj"
+ Condition="'$(TargetArchitecture)' == 'arm'" />
+
+ <!-- https://github.com/dotnet/runtime/issues/94653 -->
+ <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Threading.Tasks.Tests\System.Threading.Tasks.Tests.csproj"
+ Condition="'$(TargetsLinuxMusl)' == 'true'" />
+
<!-- Not applicable to NativeAOT -->
<ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.HostFactoryResolver\tests\Microsoft.Extensions.HostFactoryResolver.Tests.csproj" />
<ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.Loader\tests\DefaultContext\System.Runtime.Loader.DefaultContext.Tests.csproj" />
@@ -607,7 +618,9 @@
<!-- Don't want the default smoke tests - just verify that threading works -->
<SmokeTestProject Remove="@(SmokeTestProject)" />
<SmokeTestProject Include="$(MonoProjectRoot)sample\wasm\browser-threads\Wasm.Browser.Threads.Sample.csproj" />
- <SmokeTestProject Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices.JavaScript\tests\System.Runtime.InteropServices.JavaScript.UnitTests\System.Runtime.InteropServices.JavaScript.Tests.csproj" />
+ <!-- Issue: https://github.com/dotnet/runtime/issues/98406 -->
+ <!-- Issue: https://github.com/dotnet/runtime/issues/98101 -->
+ <!-- <SmokeTestProject Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices.JavaScript\tests\System.Runtime.InteropServices.JavaScript.UnitTests\System.Runtime.InteropServices.JavaScript.Tests.csproj" /> -->
<SmokeTestProject Include="$(MSBuildThisFileDirectory)System.Net.WebSockets.Client\tests\System.Net.WebSockets.Client.Tests.csproj" />
<SmokeTestProject Include="$(MSBuildThisFileDirectory)System.Net.Http\tests\FunctionalTests\System.Net.Http.Functional.Tests.csproj" />
</ItemGroup>
@@ -758,7 +771,7 @@
BuildInParallel="$(Samples_BuildInParallel)" />
</ItemGroup>
- <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(BuildTargetFramework)' == '$(NetCoreAppCurrent)' and '$(IsXUnitLogCheckerSupported)' == 'true'">
+ <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(TargetFrameworkIdentifier)' == '.NETCoreApp' and '$(IsXUnitLogCheckerSupported)' == 'true'">
<ProjectReference
Include="$(RepoRoot)src\tests\Common\XUnitLogChecker\XUnitLogChecker.csproj"
AdditionalProperties="%(AdditionalProperties);Configuration=Release;OutDir=$(XUnitLogCheckerLibrariesOutDir)" />
diff --git a/src/mono/System.Private.CoreLib/System.Private.CoreLib.csproj b/src/mono/System.Private.CoreLib/System.Private.CoreLib.csproj
index 1c085ed36e44..c273e6ba4172 100644
--- a/src/mono/System.Private.CoreLib/System.Private.CoreLib.csproj
+++ b/src/mono/System.Private.CoreLib/System.Private.CoreLib.csproj
@@ -246,7 +246,6 @@
<Compile Include="$(BclSourcesRoot)\System\Reflection\Metadata\AssemblyExtensions.cs" />
<Compile Include="$(BclSourcesRoot)\System\Reflection\Metadata\MetadataUpdater.cs" />
<Compile Include="$(BclSourcesRoot)\System\Reflection\TypeNameParser.Mono.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Resources\ManifestBasedResourceGroveler.Mono.cs" />
<Compile Include="$(BclSourcesRoot)\System\Runtime\ControlledExecution.Mono.cs" />
<Compile Include="$(BclSourcesRoot)\System\Runtime\DependentHandle.cs" />
<Compile Include="$(BclSourcesRoot)\System\Runtime\GCFrameRegistration.Mono.cs" />
@@ -264,7 +263,6 @@
Condition="'$(FeatureObjCMarshal)' == 'true'"/>
<Compile Include="$(BclSourcesRoot)\System\Runtime\Intrinsics\X86\X86Base.Mono.cs" />
<Compile Include="$(BclSourcesRoot)\System\Runtime\Loader\AssemblyLoadContext.Mono.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Security\DynamicSecurityMethodAttribute.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\Interlocked.Mono.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\Monitor.Mono.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\ObjectHeader.Mono.cs" />
diff --git a/src/mono/System.Private.CoreLib/src/System/Buffer.Mono.cs b/src/mono/System.Private.CoreLib/src/System/Buffer.Mono.cs
index 8f45f602e6fb..bcb9b6b38f2e 100644
--- a/src/mono/System.Private.CoreLib/src/System/Buffer.Mono.cs
+++ b/src/mono/System.Private.CoreLib/src/System/Buffer.Mono.cs
@@ -23,7 +23,7 @@ namespace System
{
#pragma warning disable 8500 // sizeof of managed types
// Blittable memmove
- Memmove(
+ SpanHelpers.Memmove(
ref Unsafe.As<T, byte>(ref destination),
ref Unsafe.As<T, byte>(ref source),
elementCount * (nuint)sizeof(T));
diff --git a/src/mono/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.Mono.cs b/src/mono/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.Mono.cs
deleted file mode 100644
index 54871091da0f..000000000000
--- a/src/mono/System.Private.CoreLib/src/System/Resources/ManifestBasedResourceGroveler.Mono.cs
+++ /dev/null
@@ -1,16 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System.Globalization;
-using System.Reflection;
-
-namespace System.Resources
-{
- internal partial class ManifestBasedResourceGroveler
- {
- private static Assembly? InternalGetSatelliteAssembly(Assembly mainAssembly, CultureInfo culture, Version? version)
- {
- return (RuntimeAssembly.InternalGetSatelliteAssembly(mainAssembly, culture, version, throwOnFileNotFound: false));
- }
- }
-}
diff --git a/src/mono/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.Mono.cs b/src/mono/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.Mono.cs
index 391bfaec6a2a..ca9ecdbe9f82 100644
--- a/src/mono/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.Mono.cs
+++ b/src/mono/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.Mono.cs
@@ -4,6 +4,7 @@
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Diagnostics.Tracing;
+using System.Runtime.InteropServices;
using System.Runtime.Serialization;
namespace System.Runtime.CompilerServices
@@ -137,9 +138,15 @@ namespace System.Runtime.CompilerServices
RunModuleConstructor(module.Value);
}
- public static IntPtr AllocateTypeAssociatedMemory(Type type, int size)
+ public static unsafe IntPtr AllocateTypeAssociatedMemory(Type type, int size)
{
- throw new PlatformNotSupportedException();
+ if (type is not RuntimeType)
+ throw new ArgumentException(SR.Arg_MustBeType, nameof(type));
+
+ ArgumentOutOfRangeException.ThrowIfNegative(size);
+
+ // We don't support unloading; the memory will never be freed.
+ return (IntPtr)NativeMemory.AllocZeroed((uint)size);
}
[Intrinsic]
diff --git a/src/mono/System.Private.CoreLib/src/System/Security/DynamicSecurityMethodAttribute.cs b/src/mono/System.Private.CoreLib/src/System/Security/DynamicSecurityMethodAttribute.cs
deleted file mode 100644
index e3dae854517e..000000000000
--- a/src/mono/System.Private.CoreLib/src/System/Security/DynamicSecurityMethodAttribute.cs
+++ /dev/null
@@ -1,18 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-namespace System.Security
-{
- // DynamicSecurityMethodAttribute:
- // All methods that use StackCrawlMark should be marked with this attribute. This attribute
- // disables inlining of the calling method to allow stackwalking to find the exact caller.
- //
- // This attribute used to indicate that the target method requires space for a security object
- // to be allocated on the callers stack. It is not used for this purpose anymore because of security
- // stackwalks are not ever done in CoreCLR.
- [AttributeUsage(AttributeTargets.Method | AttributeTargets.Constructor, AllowMultiple = true, Inherited = false)]
- internal sealed class DynamicSecurityMethodAttribute : Attribute
- {
- public DynamicSecurityMethodAttribute() { }
- }
-}
diff --git a/src/mono/System.Private.CoreLib/src/System/String.Mono.cs b/src/mono/System.Private.CoreLib/src/System/String.Mono.cs
index 7314504aff9a..7dedf5a6e536 100644
--- a/src/mono/System.Private.CoreLib/src/System/String.Mono.cs
+++ b/src/mono/System.Private.CoreLib/src/System/String.Mono.cs
@@ -116,7 +116,7 @@ namespace System
private static unsafe void memcpy(byte* dest, byte* src, int size)
{
- Buffer.Memmove(ref *dest, ref *src, (nuint)size);
+ SpanHelpers.Memmove(ref *dest, ref *src, (nuint)size);
}
/* Used by the runtime */
diff --git a/src/mono/browser/debugger/DebuggerTestSuite/DebuggerTestSuite.csproj b/src/mono/browser/debugger/DebuggerTestSuite/DebuggerTestSuite.csproj
index 87cfb17bbc04..7283603a955e 100644
--- a/src/mono/browser/debugger/DebuggerTestSuite/DebuggerTestSuite.csproj
+++ b/src/mono/browser/debugger/DebuggerTestSuite/DebuggerTestSuite.csproj
@@ -81,7 +81,7 @@
BeforeTargets="CopyTestZipForHelix"
DependsOnTargets="_GenerateRunSettingsFile">
- <Exec Command="&quot;$(DotNetTool)&quot; test --no-build -s $(RunSettingsFilePath) -t --nologo -v:q" ConsoleToMSBuild="true">
+ <Exec Command="&quot;$(DotNetTool)&quot; test --no-build -s $(RunSettingsFilePath) -t --nologo -v:q -p:VsTestUseMSBuildOutput=false" ConsoleToMSBuild="true">
<Output TaskParameter="ConsoleOutput" ItemName="_ListOfTestsLines" />
</Exec>
@@ -89,12 +89,15 @@
<_Regex>^ *(DebuggerTests[^\($]+)</_Regex>
</PropertyGroup>
<ItemGroup>
- <_TestLines0 Include="$([System.Text.RegularExpressions.Regex]::Match('%(_ListOfTestsLines.Identity)', $(_Regex)))" />
- <TestClassName Include="$([System.IO.Path]::GetFileNameWithoutExtension(%(_TestLines0.Identity)))" />
+ <_TestLines0 Include="$([System.Text.RegularExpressions.Regex]::Match('%(_ListOfTestsLines.Identity)', '$(_Regex)'))" />
+ <TestClassName Include="$([System.IO.Path]::GetFileNameWithoutExtension('%(_TestLines0.Identity)'))" />
</ItemGroup>
+ <Error Text="No DebuggerTests test classes found!" Condition="'@(TestClassName)' == ''" />
+
<WriteLinesToFile File="$(TestArchiveTestsDir)$(MSBuildProjectName).tests.list"
- Lines="@(TestClassName->Distinct())" />
+ Lines="@(TestClassName->Distinct())"
+ Overwrite="true" />
</Target>
<!-- Copy of `GenerateRunSettingsFile` from eng/testing/runsettings.targets -->
diff --git a/src/mono/browser/runtime/debug.ts b/src/mono/browser/runtime/debug.ts
index 74c0128f2e4e..1cbe85aff718 100644
--- a/src/mono/browser/runtime/debug.ts
+++ b/src/mono/browser/runtime/debug.ts
@@ -1,8 +1,6 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-import BuildConfiguration from "consts:configuration";
-
import { INTERNAL, Module, loaderHelpers, runtimeHelpers } from "./globals";
import { toBase64StringImpl } from "./base64";
import cwraps from "./cwraps";
@@ -364,11 +362,6 @@ export function mono_wasm_debugger_log(level: number, message_ptr: CharPtr): voi
INTERNAL.logging.debugger(level, message);
return;
}
-
- if (BuildConfiguration === "Debug") {
- // eslint-disable-next-line no-console
- console.debug(`Debugger.Debug: ${message}`);
- }
}
type CallDetails = {
diff --git a/src/mono/browser/runtime/exports-binding.ts b/src/mono/browser/runtime/exports-binding.ts
index 3e65378483df..986e6f89a9c5 100644
--- a/src/mono/browser/runtime/exports-binding.ts
+++ b/src/mono/browser/runtime/exports-binding.ts
@@ -31,6 +31,7 @@ import {
mono_wasm_pthread_on_pthread_attached, mono_wasm_pthread_on_pthread_unregistered,
mono_wasm_pthread_on_pthread_registered, mono_wasm_pthread_set_name, mono_wasm_install_js_worker_interop, mono_wasm_uninstall_js_worker_interop
} from "./pthreads";
+import { mono_wasm_dump_threads } from "./pthreads/ui-thread";
// the JS methods would be visible to EMCC linker and become imports of the WASM module
@@ -45,6 +46,8 @@ export const mono_wasm_threads_imports = !WasmEnableThreads ? [] : [
// threads.c
mono_wasm_eventloop_has_unsettled_interop_promises,
+ // mono-threads.c
+ mono_wasm_dump_threads,
// diagnostics_server.c
mono_wasm_diagnostic_server_on_server_thread_created,
mono_wasm_diagnostic_server_on_runtime_server_init,
diff --git a/src/mono/browser/runtime/exports-internal.ts b/src/mono/browser/runtime/exports-internal.ts
index 80c52669fb3a..61c6e1fe6e3f 100644
--- a/src/mono/browser/runtime/exports-internal.ts
+++ b/src/mono/browser/runtime/exports-internal.ts
@@ -23,14 +23,14 @@ import { mono_wasm_get_func_id_to_name_mappings } from "./logging";
import { monoStringToStringUnsafe } from "./strings";
import { mono_wasm_bind_cs_function } from "./invoke-cs";
-import { dumpThreads, thread_available } from "./pthreads";
+import { mono_wasm_dump_threads, thread_available } from "./pthreads";
export function export_internal(): any {
return {
// tests
mono_wasm_exit: (exit_code: number) => { Module.err("early exit " + exit_code); },
forceDisposeProxies,
- dumpThreads: WasmEnableThreads ? dumpThreads : undefined,
+ mono_wasm_dump_threads: WasmEnableThreads ? mono_wasm_dump_threads : undefined,
// with mono_wasm_debugger_log and mono_wasm_trace_logger
logging: undefined,
diff --git a/src/mono/browser/runtime/exports.ts b/src/mono/browser/runtime/exports.ts
index 8b2984f393b1..2a4dc08320a0 100644
--- a/src/mono/browser/runtime/exports.ts
+++ b/src/mono/browser/runtime/exports.ts
@@ -10,7 +10,7 @@ import WasmEnableExceptionHandling from "consts:wasmEnableExceptionHandling";
import type { RuntimeAPI } from "./types";
import { Module, exportedRuntimeAPI, loaderHelpers, passEmscriptenInternals, runtimeHelpers, setRuntimeGlobals, } from "./globals";
-import { GlobalObjects } from "./types/internal";
+import { GlobalObjects, RuntimeHelpers } from "./types/internal";
import { configureEmscriptenStartup, configureRuntimeStartup, configureWorkerStartup } from "./startup";
import { create_weak_ref } from "./weak-ref";
@@ -22,7 +22,7 @@ import { mono_wasm_stringify_as_error_with_stack } from "./logging";
import { instantiate_asset, instantiate_symbols_asset, instantiate_segmentation_rules_asset } from "./assets";
import { jiterpreter_dump_stats } from "./jiterpreter";
import { forceDisposeProxies } from "./gc-handles";
-import { dumpThreads } from "./pthreads";
+import { mono_wasm_dump_threads } from "./pthreads";
export let runtimeList: RuntimeList;
@@ -32,19 +32,19 @@ function initializeExports(globalObjects: GlobalObjects): RuntimeAPI {
const globalThisAny = globalThis as any;
Object.assign(globals.internal, export_internal());
- Object.assign(runtimeHelpers, {
+ const rh: Partial<RuntimeHelpers> = {
stringify_as_error_with_stack: mono_wasm_stringify_as_error_with_stack,
instantiate_symbols_asset,
instantiate_asset,
jiterpreter_dump_stats,
forceDisposeProxies,
instantiate_segmentation_rules_asset,
- });
+
+ };
if (WasmEnableThreads) {
- Object.assign(runtimeHelpers, {
- dumpThreads,
- });
+ rh.dumpThreads = mono_wasm_dump_threads;
}
+ Object.assign(runtimeHelpers, rh);
const API = export_api();
Object.assign(exportedRuntimeAPI, {
diff --git a/src/mono/browser/runtime/jiterpreter-support.ts b/src/mono/browser/runtime/jiterpreter-support.ts
index 998056d0aa14..6ca884459d95 100644
--- a/src/mono/browser/runtime/jiterpreter-support.ts
+++ b/src/mono/browser/runtime/jiterpreter-support.ts
@@ -1148,7 +1148,7 @@ class Cfg {
blockStack: Array<MintOpcodePtr> = [];
backDispatchOffsets: Array<MintOpcodePtr> = [];
dispatchTable = new Map<MintOpcodePtr, number>();
- observedBranchTargets = new Set<MintOpcodePtr>();
+ observedBackBranchTargets = new Set<MintOpcodePtr>();
trace = 0;
constructor(builder: WasmBuilder) {
@@ -1165,7 +1165,7 @@ class Cfg {
this.lastSegmentEnd = 0;
this.overheadBytes = 10; // epilogue
this.dispatchTable.clear();
- this.observedBranchTargets.clear();
+ this.observedBackBranchTargets.clear();
this.trace = trace;
this.backDispatchOffsets.length = 0;
}
@@ -1212,7 +1212,9 @@ class Cfg {
}
branch(target: MintOpcodePtr, isBackward: boolean, branchType: CfgBranchType) {
- this.observedBranchTargets.add(target);
+ if (isBackward)
+ this.observedBackBranchTargets.add(target);
+
this.appendBlob();
this.segments.push({
type: "branch",
@@ -1224,7 +1226,10 @@ class Cfg {
// some branches will generate bailouts instead so we allocate 4 bytes per branch
// to try and balance this out and avoid underestimating too much
this.overheadBytes += 4; // forward branches are a constant br + depth (optimally 2 bytes)
+
if (isBackward) {
+ // TODO: Make this smaller by setting the flag inside the dispatcher when disp != 0,
+ // this will save space for any trace with more than one back-branch
// get_local <cinfo>
// i32_const 1
// i32_store 0 0
@@ -1298,7 +1303,7 @@ class Cfg {
const breakDepth = this.blockStack.indexOf(offset);
if (breakDepth < 0)
continue;
- if (!this.observedBranchTargets.has(offset))
+ if (!this.observedBackBranchTargets.has(offset))
continue;
this.dispatchTable.set(offset, this.backDispatchOffsets.length + 1);
diff --git a/src/mono/browser/runtime/jiterpreter-trace-generator.ts b/src/mono/browser/runtime/jiterpreter-trace-generator.ts
index a2edb8838202..9b3002eae107 100644
--- a/src/mono/browser/runtime/jiterpreter-trace-generator.ts
+++ b/src/mono/browser/runtime/jiterpreter-trace-generator.ts
@@ -142,6 +142,8 @@ export function generateBackwardBranchTable(
// IP of the start of the trace in U16s, relative to startOfBody.
const rbase16 = (<any>ip - <any>startOfBody) / 2;
+ // FIXME: This will potentially scan the entire method and record branches that won't
+ // ever run since the trace compilation will end before we reach them.
while (ip < endOfBody) {
// IP of the current opcode in U16s, relative to startOfBody. This is what the back branch table uses
const rip16 = (<any>ip - <any>startOfBody) / 2;
@@ -166,16 +168,23 @@ export function generateBackwardBranchTable(
break;
}
- const rtarget16 = rip16 + (displacement);
- if (rtarget16 < 0) {
- mono_log_info(`opcode @${ip}'s displacement of ${displacement} goes before body: ${rtarget16}. aborting backbranch table generation`);
- break;
- }
+ // Only record *backward* branches
+ // We will filter this down further in the Cfg because it takes note of which branches it sees,
+ // but it is also beneficial to have a null table (further down) due to seeing no potential
+ // back branch targets at all, as it allows the Cfg to skip additional code generation entirely
+ // if it knows there will never be any backwards branches in a given trace
+ if (displacement < 0) {
+ const rtarget16 = rip16 + (displacement);
+ if (rtarget16 < 0) {
+ mono_log_info(`opcode @${ip}'s displacement of ${displacement} goes before body: ${rtarget16}. aborting backbranch table generation`);
+ break;
+ }
- // If the relative target is before the start of the trace, don't record it.
- // The trace will be unable to successfully branch to it so it would just make the table bigger.
- if (rtarget16 >= rbase16)
- table.push(rtarget16);
+ // If the relative target is before the start of the trace, don't record it.
+ // The trace will be unable to successfully branch to it so it would just make the table bigger.
+ if (rtarget16 >= rbase16)
+ table.push(rtarget16);
+ }
switch (opcode) {
case MintOpcode.MINT_CALL_HANDLER:
@@ -369,7 +378,20 @@ export function generateWasmBody(
builder.callImport("localloc");
break;
}
- case MintOpcode.MINT_INITOBJ: {
+ case MintOpcode.MINT_ZEROBLK: {
+ // dest
+ append_ldloc(builder, getArgU16(ip, 1), WasmOpcode.i32_load);
+ // value
+ builder.i32_const(0);
+ // count
+ append_ldloc(builder, getArgU16(ip, 2), WasmOpcode.i32_load);
+ // memset
+ builder.appendU8(WasmOpcode.PREFIX_sat);
+ builder.appendU8(11);
+ builder.appendU8(0);
+ break;
+ }
+ case MintOpcode.MINT_ZEROBLK_IMM: {
append_ldloc(builder, getArgU16(ip, 1), WasmOpcode.i32_load);
append_memset_dest(builder, 0, getArgU16(ip, 2));
break;
diff --git a/src/mono/browser/runtime/jiterpreter.ts b/src/mono/browser/runtime/jiterpreter.ts
index 9d47c2e39b1e..5ef86e182905 100644
--- a/src/mono/browser/runtime/jiterpreter.ts
+++ b/src/mono/browser/runtime/jiterpreter.ts
@@ -891,7 +891,7 @@ function generate_wasm(
// suites or benchmarks if you've enabled stats
const tracesCompiled = getCounter(JiterpCounter.TracesCompiled);
if (builder.options.enableStats && tracesCompiled && (tracesCompiled % autoDumpInterval) === 0)
- jiterpreter_dump_stats(false, true);
+ jiterpreter_dump_stats(true);
return idx;
} catch (exc: any) {
@@ -1074,14 +1074,14 @@ export function mono_jiterp_free_method_data_js(
mono_jiterp_free_method_data_jit_call(method);
}
-export function jiterpreter_dump_stats(b?: boolean, concise?: boolean) {
+export function jiterpreter_dump_stats(concise?: boolean): void {
if (!runtimeHelpers.runtimeReady) {
return;
}
- if (!mostRecentOptions || (b !== undefined))
+ if (!mostRecentOptions)
mostRecentOptions = getOptions();
- if (!mostRecentOptions.enableStats && (b !== undefined))
+ if (!mostRecentOptions.enableStats)
return;
const backBranchesEmitted = getCounter(JiterpCounter.BackBranchesEmitted),
@@ -1243,10 +1243,4 @@ export function jiterpreter_dump_stats(b?: boolean, concise?: boolean) {
for (const k in simdFallbackCounters)
mono_log_info(`// simd ${k}: ${simdFallbackCounters[k]} fallback insn(s)`);
-
- if ((typeof (globalThis.setTimeout) === "function") && (b !== undefined))
- setTimeout(
- () => jiterpreter_dump_stats(b),
- 15000
- );
}
diff --git a/src/mono/browser/runtime/pthreads/index.ts b/src/mono/browser/runtime/pthreads/index.ts
index a97f0f0f0608..c3128d2be284 100644
--- a/src/mono/browser/runtime/pthreads/index.ts
+++ b/src/mono/browser/runtime/pthreads/index.ts
@@ -6,7 +6,7 @@ export {
mono_wasm_pthread_ptr, update_thread_info, isMonoThreadMessage, monoThreadInfo,
} from "./shared";
export {
- dumpThreads, thread_available, cancelThreads, is_thread_available,
+ mono_wasm_dump_threads, thread_available, cancelThreads, is_thread_available,
populateEmscriptenPool, mono_wasm_init_threads, init_finalizer_thread,
waitForThread, replaceEmscriptenPThreadUI
} from "./ui-thread";
diff --git a/src/mono/browser/runtime/pthreads/ui-thread.ts b/src/mono/browser/runtime/pthreads/ui-thread.ts
index c7fb54a66e48..baa72e7a3b97 100644
--- a/src/mono/browser/runtime/pthreads/ui-thread.ts
+++ b/src/mono/browser/runtime/pthreads/ui-thread.ts
@@ -182,7 +182,7 @@ export function cancelThreads() {
}
}
-export function dumpThreads(): void {
+export function mono_wasm_dump_threads(): void {
if (!WasmEnableThreads) return;
mono_log_info("Dumping web worker info as seen by UI thread, it could be stale: ");
const emptyInfo: PThreadInfo = {
@@ -278,7 +278,7 @@ export function replaceEmscriptenPThreadUI(modulePThread: PThreadLibrary): void
}
};
if (BuildConfiguration === "Debug") {
- (globalThis as any).dumpThreads = dumpThreads;
+ (globalThis as any).dumpThreads = mono_wasm_dump_threads;
(globalThis as any).getModulePThread = getModulePThread;
}
}
diff --git a/src/mono/browser/runtime/types/internal.ts b/src/mono/browser/runtime/types/internal.ts
index 4c9fa7babc62..7d4eb89d0f3c 100644
--- a/src/mono/browser/runtime/types/internal.ts
+++ b/src/mono/browser/runtime/types/internal.ts
@@ -235,7 +235,7 @@ export type RuntimeHelpers = {
instantiate_asset: (asset: AssetEntry, url: string, bytes: Uint8Array) => void,
instantiate_symbols_asset: (pendingAsset: AssetEntryInternal) => Promise<void>,
instantiate_segmentation_rules_asset: (pendingAsset: AssetEntryInternal) => Promise<void>,
- jiterpreter_dump_stats?: (x: boolean) => string,
+ jiterpreter_dump_stats?: (concise?: boolean) => void,
forceDisposeProxies: (disposeMethods: boolean, verbose: boolean) => void,
dumpThreads: () => void,
}
diff --git a/src/mono/mono/mini/aot-compiler.c b/src/mono/mono/mini/aot-compiler.c
index d8f80b0bc6a9..48de41e36bd9 100644
--- a/src/mono/mono/mini/aot-compiler.c
+++ b/src/mono/mono/mini/aot-compiler.c
@@ -448,6 +448,7 @@ static MonoAotCompile *llvm_acfg;
static MonoAotCompile *current_acfg;
static MonoAssembly *dedup_assembly;
static GHashTable *dedup_methods;
+static GPtrArray *dedup_methods_list;
/* Cache of decoded method external icall symbol names. */
/* Owned by acfg, but kept in this static as well since it is */
@@ -4351,11 +4352,13 @@ collect_dedup_method (MonoAotCompile *acfg, MonoMethod *method)
return TRUE;
// Remember for later
g_assert (acfg->dedup_phase == DEDUP_COLLECT);
- if (!g_hash_table_lookup (dedup_methods, method))
+ if (!g_hash_table_lookup (dedup_methods, method)) {
g_hash_table_insert (dedup_methods, method, method);
- else
+ g_ptr_array_add (dedup_methods_list, method);
+ } else {
// Already processed when compiling another assembly
return TRUE;
+ }
}
return FALSE;
}
@@ -15099,13 +15102,10 @@ aot_assembly (MonoAssembly *ass, guint32 jit_opts, MonoAotOptions *aot_options)
/* Add collected dedup-able methods */
aot_printf (acfg, "Adding %d dedup-ed methods.\n", g_hash_table_size (dedup_methods));
- GHashTableIter iter;
- MonoMethod *key;
- MonoMethod *method;
-
- g_hash_table_iter_init (&iter, dedup_methods);
- while (g_hash_table_iter_next (&iter, (gpointer *)&key, (gpointer *)&method))
+ for (guint i = 0; i < dedup_methods_list->len; ++i) {
+ MonoMethod *method = (MonoMethod*)g_ptr_array_index (dedup_methods_list, i);
add_method_full (acfg, method, TRUE, 0);
+ }
}
{
@@ -15570,6 +15570,7 @@ mono_aot_assemblies (MonoAssembly **assemblies, int nassemblies, guint32 jit_opt
assemblies [dedup_aindex] = atmp;
dedup_methods = g_hash_table_new (NULL, NULL);
+ dedup_methods_list = g_ptr_array_new ();
}
if (aot_opts.trimming_eligible_methods_outfile) {
diff --git a/src/mono/mono/mini/exceptions-ppc.c b/src/mono/mono/mini/exceptions-ppc.c
index 146fece23692..c7537a258acf 100644
--- a/src/mono/mono/mini/exceptions-ppc.c
+++ b/src/mono/mono/mini/exceptions-ppc.c
@@ -838,5 +838,8 @@ mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
ctx->regs[2] = (gulong)handler_ftnptr->toc;
#else
MONO_CONTEXT_SET_IP(ctx, (unsigned long) func);
+#ifdef TARGET_POWERPC64
+ ctx->regs[12] = (gulong)func;
+#endif
#endif
}
diff --git a/src/mono/mono/mini/interp/interp.c b/src/mono/mono/mini/interp/interp.c
index c16afc494011..070278d2cd0e 100644
--- a/src/mono/mono/mini/interp/interp.c
+++ b/src/mono/mono/mini/interp/interp.c
@@ -7618,7 +7618,11 @@ MINT_IN_CASE(MINT_BRTRUE_I8_SP) ZEROP_SP(gint64, !=); MINT_IN_BREAK;
/* top of stack is result of filter */
frame->retval->data.i = LOCAL_VAR (ip [1], gint32);
goto exit_clause;
- MINT_IN_CASE(MINT_INITOBJ)
+ MINT_IN_CASE(MINT_ZEROBLK)
+ memset (LOCAL_VAR (ip [1], gpointer), 0, LOCAL_VAR (ip [2], gsize));
+ ip += 3;
+ MINT_IN_BREAK;
+ MINT_IN_CASE(MINT_ZEROBLK_IMM)
memset (LOCAL_VAR (ip [1], gpointer), 0, ip [2]);
ip += 3;
MINT_IN_BREAK;
diff --git a/src/mono/mono/mini/interp/jiterpreter-opcode-values.h b/src/mono/mono/mini/interp/jiterpreter-opcode-values.h
index bf9965dc148a..fb677b438d38 100644
--- a/src/mono/mono/mini/interp/jiterpreter-opcode-values.h
+++ b/src/mono/mono/mini/interp/jiterpreter-opcode-values.h
@@ -132,7 +132,8 @@ OP(MINT_INTRINS_MEMORYMARSHAL_GETARRAYDATAREF, HIGH)
OP(MINT_INITLOCAL, MASSIVE)
OP(MINT_INITLOCALS, MASSIVE)
OP(MINT_LOCALLOC, NORMAL)
-OP(MINT_INITOBJ, MASSIVE)
+OP(MINT_ZEROBLK, MASSIVE)
+OP(MINT_ZEROBLK_IMM, HIGH)
OP(MINT_INTRINS_RUNTIMEHELPERS_OBJECT_HAS_COMPONENT_SIZE, HIGH)
OP(MINT_INTRINS_ENUM_HASFLAG, HIGH)
OP(MINT_INTRINS_ORDINAL_IGNORE_CASE_ASCII, HIGH)
diff --git a/src/mono/mono/mini/interp/mintops.def b/src/mono/mono/mini/interp/mintops.def
index d45579ce40fe..87108ca73bff 100644
--- a/src/mono/mono/mini/interp/mintops.def
+++ b/src/mono/mono/mini/interp/mintops.def
@@ -363,7 +363,8 @@ OPDEF(MINT_NEWOBJ_STRING, "newobj_string", 4, 1, 1, MintOpMethodToken)
OPDEF(MINT_NEWOBJ, "newobj", 5, 1, 1, MintOpMethodToken)
OPDEF(MINT_NEWOBJ_INLINED, "newobj_inlined", 3, 1, 0, MintOpVTableToken)
OPDEF(MINT_NEWOBJ_VT, "newobj_vt", 5, 1, 1, MintOpMethodToken)
-OPDEF(MINT_INITOBJ, "initobj", 3, 0, 1, MintOpShortInt)
+OPDEF(MINT_ZEROBLK, "zeroblk", 3, 0, 2, MintOpNoArgs)
+OPDEF(MINT_ZEROBLK_IMM, "zeroblk_imm", 3, 0, 1, MintOpShortInt)
OPDEF(MINT_CASTCLASS, "castclass", 4, 1, 1, MintOpClassToken)
OPDEF(MINT_ISINST, "isinst", 4, 1, 1, MintOpClassToken)
OPDEF(MINT_CASTCLASS_INTERFACE, "castclass.interface", 4, 1, 1, MintOpClassToken)
@@ -655,9 +656,6 @@ OPDEF(MINT_RET_I8_IMM, "ret.i8.imm", 2, 0, 0, MintOpShortInt)
OPDEF(MINT_ADD_I4_IMM, "add.i4.imm", 4, 1, 1, MintOpShortInt)
OPDEF(MINT_ADD_I8_IMM, "add.i8.imm", 4, 1, 1, MintOpShortInt)
-OPDEF(MINT_ADD_MUL_I4_IMM, "add.mul.i4.imm", 5, 1, 1, MintOpTwoShorts)
-OPDEF(MINT_ADD_MUL_I8_IMM, "add.mul.i8.imm", 5, 1, 1, MintOpTwoShorts)
-
OPDEF(MINT_MUL_I4_IMM, "mul.i4.imm", 4, 1, 1, MintOpShortInt)
OPDEF(MINT_MUL_I8_IMM, "mul.i8.imm", 4, 1, 1, MintOpShortInt)
@@ -668,6 +666,9 @@ OPDEF(MINT_SHL_I8_IMM, "shl.i8.imm", 4, 1, 1, MintOpShortInt)
OPDEF(MINT_SHR_I4_IMM, "shr.i4.imm", 4, 1, 1, MintOpShortInt)
OPDEF(MINT_SHR_I8_IMM, "shr.i8.imm", 4, 1, 1, MintOpShortInt)
+OPDEF(MINT_ADD_MUL_I4_IMM, "add.mul.i4.imm", 5, 1, 1, MintOpTwoShorts)
+OPDEF(MINT_ADD_MUL_I8_IMM, "add.mul.i8.imm", 5, 1, 1, MintOpTwoShorts)
+
OPDEF(MINT_SHL_AND_I4, "shl.i4.and", 4, 1, 2, MintOpNoArgs)
OPDEF(MINT_SHL_AND_I8, "shl.i8.and", 4, 1, 2, MintOpNoArgs)
diff --git a/src/mono/mono/mini/interp/mintops.h b/src/mono/mono/mini/interp/mintops.h
index 27e3821dbccf..a50a7d409215 100644
--- a/src/mono/mono/mini/interp/mintops.h
+++ b/src/mono/mono/mini/interp/mintops.h
@@ -224,6 +224,7 @@ typedef enum {
#define MINT_IS_LDC_I8(op) ((op) >= MINT_LDC_I8_0 && (op) <= MINT_LDC_I8)
#define MINT_IS_UNOP(op) ((op) >= MINT_ADD1_I4 && (op) <= MINT_CEQ0_I4)
#define MINT_IS_BINOP(op) ((op) >= MINT_ADD_I4 && (op) <= MINT_CLT_UN_R8)
+#define MINT_IS_BINOP_IMM(op) ((op) >= MINT_ADD_I4_IMM && (op) <= MINT_SHR_I8_IMM)
#define MINT_IS_BINOP_SHIFT(op) ((op) >= MINT_SHR_UN_I4 && (op) <= MINT_SHR_I8)
#define MINT_IS_LDFLD(op) ((op) >= MINT_LDFLD_I1 && (op) <= MINT_LDFLD_O)
#define MINT_IS_STFLD(op) ((op) >= MINT_STFLD_I1 && (op) <= MINT_STFLD_O)
diff --git a/src/mono/mono/mini/interp/transform-opt.c b/src/mono/mono/mini/interp/transform-opt.c
index 0e4e7be42065..1b5e2b7a026b 100644
--- a/src/mono/mono/mini/interp/transform-opt.c
+++ b/src/mono/mono/mini/interp/transform-opt.c
@@ -1959,7 +1959,8 @@ interp_reorder_bblocks (TransformData *td)
InterpInst *last_ins = interp_last_ins (in_bb);
if (last_ins && (MINT_IS_CONDITIONAL_BRANCH (last_ins->opcode) ||
MINT_IS_UNCONDITIONAL_BRANCH (last_ins->opcode)) &&
- last_ins->info.target_bb == bb) {
+ last_ins->info.target_bb == bb &&
+ in_bb != bb) {
InterpBasicBlock *target_bb = first->info.target_bb;
last_ins->info.target_bb = target_bb;
interp_unlink_bblocks (in_bb, bb);
@@ -2129,6 +2130,12 @@ interp_get_mt_for_ldind (int ldind_op)
result.field = op val->field; \
break;
+#define INTERP_FOLD_SHIFTOP_IMM(opcode,local_type,field,shift_op,cast_type) \
+ case opcode: \
+ result.type = local_type; \
+ result.field = (cast_type)val->field shift_op ins->data [0]; \
+ break;
+
#define INTERP_FOLD_CONV(opcode,val_type_dst,field_dst,val_type_src,field_src,cast_type) \
case opcode: \
result.type = val_type_dst; \
@@ -2168,6 +2175,19 @@ interp_fold_unop (TransformData *td, InterpInst *ins)
INTERP_FOLD_UNOP (MINT_NOT_I8, VAR_VALUE_I8, l, ~);
INTERP_FOLD_UNOP (MINT_CEQ0_I4, VAR_VALUE_I4, i, 0 ==);
+ INTERP_FOLD_UNOP (MINT_ADD_I4_IMM, VAR_VALUE_I4, i, ((gint32)(gint16)ins->data [0])+);
+ INTERP_FOLD_UNOP (MINT_ADD_I8_IMM, VAR_VALUE_I8, l, ((gint64)(gint16)ins->data [0])+);
+
+ INTERP_FOLD_UNOP (MINT_MUL_I4_IMM, VAR_VALUE_I4, i, ((gint32)(gint16)ins->data [0])*);
+ INTERP_FOLD_UNOP (MINT_MUL_I8_IMM, VAR_VALUE_I8, l, ((gint64)(gint16)ins->data [0])*);
+
+ INTERP_FOLD_SHIFTOP_IMM (MINT_SHR_UN_I4_IMM, VAR_VALUE_I4, i, >>, guint32);
+ INTERP_FOLD_SHIFTOP_IMM (MINT_SHR_UN_I8_IMM, VAR_VALUE_I8, l, >>, guint64);
+ INTERP_FOLD_SHIFTOP_IMM (MINT_SHL_I4_IMM, VAR_VALUE_I4, i, <<, gint32);
+ INTERP_FOLD_SHIFTOP_IMM (MINT_SHL_I8_IMM, VAR_VALUE_I8, l, <<, gint64);
+ INTERP_FOLD_SHIFTOP_IMM (MINT_SHR_I4_IMM, VAR_VALUE_I4, i, >>, gint32);
+ INTERP_FOLD_SHIFTOP_IMM (MINT_SHR_I8_IMM, VAR_VALUE_I8, l, >>, gint64);
+
INTERP_FOLD_CONV (MINT_CONV_I1_I4, VAR_VALUE_I4, i, VAR_VALUE_I4, i, gint8);
INTERP_FOLD_CONV (MINT_CONV_I1_I8, VAR_VALUE_I4, i, VAR_VALUE_I8, l, gint8);
INTERP_FOLD_CONV (MINT_CONV_U1_I4, VAR_VALUE_I4, i, VAR_VALUE_I4, i, guint8);
@@ -2901,7 +2921,7 @@ retry_instruction:
td->var_values [dreg].type = VAR_VALUE_I4;
td->var_values [dreg].i = (gint32)td->data_items [ins->data [0]];
#endif
- } else if (MINT_IS_UNOP (opcode)) {
+ } else if (MINT_IS_UNOP (opcode) || MINT_IS_BINOP_IMM (opcode)) {
ins = interp_fold_unop (td, ins);
} else if (MINT_IS_UNOP_CONDITIONAL_BRANCH (opcode)) {
ins = interp_fold_unop_cond_br (td, bb, ins);
@@ -3021,12 +3041,12 @@ retry_instruction:
interp_dump_ins (ins, td->data_items);
}
}
- } else if (opcode == MINT_INITOBJ) {
+ } else if (opcode == MINT_ZEROBLK_IMM) {
InterpInst *ldloca = get_var_value_def (td, sregs [0]);
if (ldloca != NULL && ldloca->opcode == MINT_LDLOCA_S) {
int size = ins->data [0];
int local = ldloca->sregs [0];
- // Replace LDLOCA + INITOBJ with or LDC
+ // Replace LDLOCA + ZEROBLK_IMM with or LDC
if (size <= 4)
ins->opcode = MINT_LDC_I4_0;
else if (size <= 8)
@@ -3037,7 +3057,7 @@ retry_instruction:
ins->dreg = local;
if (td->verbose_level) {
- g_print ("Replace ldloca/initobj pair :\n\t");
+ g_print ("Replace ldloca/zeroblk pair :\n\t");
interp_dump_ins (ins, td->data_items);
}
}
diff --git a/src/mono/mono/mini/interp/transform.c b/src/mono/mono/mini/interp/transform.c
index ac654631313f..d791ea9d579e 100644
--- a/src/mono/mono/mini/interp/transform.c
+++ b/src/mono/mono/mini/interp/transform.c
@@ -1938,9 +1938,21 @@ interp_handle_intrinsics (TransformData *td, MonoMethod *target_method, MonoClas
}
} else if (in_corlib &&
!strcmp (klass_name_space, "System") &&
- !strcmp (klass_name, "SpanHelpers") &&
- !strcmp (tm, "ClearWithReferences")) {
- *op = MINT_INTRINS_CLEAR_WITH_REFERENCES;
+ !strcmp (klass_name, "SpanHelpers")) {
+ if (!strcmp (tm, "ClearWithReferences")) {
+ *op = MINT_INTRINS_CLEAR_WITH_REFERENCES;
+ } else if (!strcmp (tm, "ClearWithoutReferences")) {
+ *op = MINT_ZEROBLK;
+ } else if (!strcmp (tm, "Fill") && csignature->param_count == 3) {
+ int align;
+ if (mono_type_size (csignature->params [2], &align) == 1) {
+ interp_add_ins (td, MINT_INITBLK);
+ td->sp -= 3;
+ interp_ins_set_sregs3 (td->last_ins, td->sp [0].var, td->sp [2].var, td->sp [1].var);
+ td->ip += 5;
+ return TRUE;
+ }
+ }
} else if (in_corlib && !strcmp (klass_name_space, "System") && !strcmp (klass_name, "Marvin")) {
if (!strcmp (tm, "Block")) {
InterpInst *ldloca2 = td->last_ins;
@@ -8125,7 +8137,7 @@ generate_code (TransformData *td, MonoMethod *method, MonoMethodHeader *header,
CHECK_TYPELOAD (klass);
if (m_class_is_valuetype (klass)) {
--td->sp;
- interp_add_ins (td, MINT_INITOBJ);
+ interp_add_ins (td, MINT_ZEROBLK_IMM);
interp_ins_set_sreg (td->last_ins, td->sp [0].var);
i32 = mono_class_value_size (klass, NULL);
g_assert (i32 < G_MAXUINT16);
diff --git a/src/mono/mono/utils/mono-threads-wasm.c b/src/mono/mono/utils/mono-threads-wasm.c
index 1746c7213e96..ca11139ce43b 100644
--- a/src/mono/mono/utils/mono-threads-wasm.c
+++ b/src/mono/mono/utils/mono-threads-wasm.c
@@ -541,9 +541,15 @@ mono_threads_wasm_on_thread_registered (void)
}
#ifndef DISABLE_THREADS
+static pthread_t deputy_thread_tid;
extern void mono_wasm_start_deputy_thread_async (void);
extern void mono_wasm_trace_logger (const char *log_domain, const char *log_level, const char *message, mono_bool fatal, void *user_data);
-static pthread_t deputy_thread_tid;
+extern void mono_wasm_dump_threads (void);
+
+void mono_wasm_dump_threads_async (void)
+{
+ mono_threads_wasm_async_run_in_target_thread (mono_threads_wasm_ui_thread_tid (), mono_wasm_dump_threads);
+}
gboolean
mono_threads_wasm_is_deputy_thread (void)
diff --git a/src/mono/mono/utils/mono-threads-wasm.h b/src/mono/mono/utils/mono-threads-wasm.h
index 13668709357d..1aa68ee7d329 100644
--- a/src/mono/mono/utils/mono-threads-wasm.h
+++ b/src/mono/mono/utils/mono-threads-wasm.h
@@ -28,6 +28,9 @@ mono_threads_wasm_ui_thread_tid (void);
#ifndef DISABLE_THREADS
+void
+mono_wasm_dump_threads_async (void);
+
gboolean
mono_threads_wasm_is_deputy_thread (void);
diff --git a/src/mono/mono/utils/mono-threads.c b/src/mono/mono/utils/mono-threads.c
index 515cde6eebad..14a00bc91542 100644
--- a/src/mono/mono/utils/mono-threads.c
+++ b/src/mono/mono/utils/mono-threads.c
@@ -281,6 +281,12 @@ mono_threads_end_global_suspend (void)
static void
dump_threads (void)
{
+#ifdef HOST_BROWSER
+#ifndef DISABLE_THREADS
+ mono_wasm_dump_threads_async ();
+#endif
+#endif
+
MonoThreadInfo *cur = mono_thread_info_current ();
g_async_safe_printf ("STATE CUE CARD: (? means a positive number, usually 1 or 2, * means any number)\n");
diff --git a/src/mono/nuget/Microsoft.NET.Sdk.WebAssembly.Pack/build/Microsoft.NET.Sdk.WebAssembly.Browser.targets b/src/mono/nuget/Microsoft.NET.Sdk.WebAssembly.Pack/build/Microsoft.NET.Sdk.WebAssembly.Browser.targets
index c51c6b88f6bc..7da061e35b9b 100644
--- a/src/mono/nuget/Microsoft.NET.Sdk.WebAssembly.Pack/build/Microsoft.NET.Sdk.WebAssembly.Browser.targets
+++ b/src/mono/nuget/Microsoft.NET.Sdk.WebAssembly.Pack/build/Microsoft.NET.Sdk.WebAssembly.Browser.targets
@@ -223,7 +223,7 @@ Copyright (c) .NET Foundation. All rights reserved.
<PropertyGroup>
<_WasmNativeAssetFileNames>;@(WasmNativeAsset->'%(FileName)%(Extension)');</_WasmNativeAssetFileNames>
</PropertyGroup>
-
+
<ItemGroup>
<_WasmConfigFileCandidates Include="@(StaticWebAsset)" Condition="'%(SourceType)' == 'Discovered'" />
diff --git a/src/mono/sample/wasm/blazor-frame/blazor.csproj b/src/mono/sample/wasm/blazor-frame/blazor.csproj
index 5592718e0714..3061f77e83ac 100644
--- a/src/mono/sample/wasm/blazor-frame/blazor.csproj
+++ b/src/mono/sample/wasm/blazor-frame/blazor.csproj
@@ -11,8 +11,8 @@
<ItemGroup>
<!-- TODO un-pin this when it's possible -->
- <PackageReference Include="Microsoft.AspNetCore.Components.WebAssembly" Version="9.0.0-alpha.1.24061.8" />
- <PackageReference Include="Microsoft.AspNetCore.Components.WebAssembly.DevServer" Version="9.0.0-alpha.1.24061.8" PrivateAssets="all" />
+ <PackageReference Include="Microsoft.AspNetCore.Components.WebAssembly" Version="9.0.0-preview.1.24081.5" />
+ <PackageReference Include="Microsoft.AspNetCore.Components.WebAssembly.DevServer" Version="9.0.0-preview.1.24081.5" PrivateAssets="all" />
</ItemGroup>
</Project>
diff --git a/src/mono/wasm/Wasm.Build.Tests/Blazor/AppsettingsTests.cs b/src/mono/wasm/Wasm.Build.Tests/Blazor/AppsettingsTests.cs
index 8069ef424f18..db0607d226a8 100644
--- a/src/mono/wasm/Wasm.Build.Tests/Blazor/AppsettingsTests.cs
+++ b/src/mono/wasm/Wasm.Build.Tests/Blazor/AppsettingsTests.cs
@@ -46,7 +46,7 @@ public class AppsettingsTests : BlazorWasmTestBase
await BlazorRunForBuildWithDotnetRun(new BlazorRunOptions()
{
Config = "debug",
- OnConsoleMessage = msg =>
+ OnConsoleMessage = (_, msg) =>
{
if (msg.Text.Contains("appSettings Exists 'True'"))
existsChecked = true;
diff --git a/src/mono/wasm/Wasm.Build.Tests/Blazor/BlazorRunOptions.cs b/src/mono/wasm/Wasm.Build.Tests/Blazor/BlazorRunOptions.cs
index 683524eac322..1b8145c69ae7 100644
--- a/src/mono/wasm/Wasm.Build.Tests/Blazor/BlazorRunOptions.cs
+++ b/src/mono/wasm/Wasm.Build.Tests/Blazor/BlazorRunOptions.cs
@@ -2,6 +2,7 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System;
+using System.Collections.Generic;
using System.Threading.Tasks;
using Microsoft.Playwright;
@@ -13,11 +14,15 @@ public record BlazorRunOptions
BlazorRunHost Host = BlazorRunHost.DotnetRun,
bool DetectRuntimeFailures = true,
bool CheckCounter = true,
+ Dictionary<string, string>? ServerEnvironment = null,
Func<IPage, Task>? Test = null,
- Action<IConsoleMessage>? OnConsoleMessage = null,
+ Action<IPage>? OnPageLoaded = null,
+ Action<IPage, IConsoleMessage>? OnConsoleMessage = null,
+ Action<string>? OnServerMessage = null,
Action<string>? OnErrorMessage = null,
string Config = "Debug",
string? ExtraArgs = null,
+ string BrowserPath = "",
string QueryString = ""
);
diff --git a/src/mono/wasm/Wasm.Build.Tests/Blazor/BlazorWasmTestBase.cs b/src/mono/wasm/Wasm.Build.Tests/Blazor/BlazorWasmTestBase.cs
index ccea19a5b497..64bbe81cc7d4 100644
--- a/src/mono/wasm/Wasm.Build.Tests/Blazor/BlazorWasmTestBase.cs
+++ b/src/mono/wasm/Wasm.Build.Tests/Blazor/BlazorWasmTestBase.cs
@@ -191,11 +191,22 @@ public abstract class BlazorWasmTestBase : WasmTemplateTestBase
{
if (!string.IsNullOrEmpty(runOptions.ExtraArgs))
runArgs += $" {runOptions.ExtraArgs}";
+
+ runOptions.ServerEnvironment?.ToList().ForEach(
+ kv => s_buildEnv.EnvVars[kv.Key] = kv.Value);
+
using var runCommand = new RunCommand(s_buildEnv, _testOutput)
.WithWorkingDirectory(workingDirectory);
await using var runner = new BrowserRunner(_testOutput);
- var page = await runner.RunAsync(runCommand, runArgs, onConsoleMessage: OnConsoleMessage, onError: OnErrorMessage, modifyBrowserUrl: browserUrl => browserUrl + runOptions.QueryString);
+ var page = await runner.RunAsync(
+ runCommand,
+ runArgs,
+ onPageLoaded: runOptions.OnPageLoaded,
+ onConsoleMessage: OnConsoleMessage,
+ onServerMessage: runOptions.OnServerMessage,
+ onError: OnErrorMessage,
+ modifyBrowserUrl: browserUrl => browserUrl + runOptions.BrowserPath + runOptions.QueryString);
_testOutput.WriteLine("Waiting for page to load");
await page.WaitForLoadStateAsync(LoadState.DOMContentLoaded, new () { Timeout = 1 * 60 * 1000 });
@@ -217,11 +228,11 @@ public abstract class BlazorWasmTestBase : WasmTemplateTestBase
_testOutput.WriteLine($"Waiting for additional 10secs to see if any errors are reported");
await Task.Delay(10_000);
- void OnConsoleMessage(IConsoleMessage msg)
+ void OnConsoleMessage(IPage page, IConsoleMessage msg)
{
_testOutput.WriteLine($"[{msg.Type}] {msg.Text}");
- runOptions.OnConsoleMessage?.Invoke(msg);
+ runOptions.OnConsoleMessage?.Invoke(page, msg);
if (runOptions.DetectRuntimeFailures)
{
@@ -237,6 +248,12 @@ public abstract class BlazorWasmTestBase : WasmTemplateTestBase
}
}
- public string FindBlazorBinFrameworkDir(string config, bool forPublish, string framework = DefaultTargetFrameworkForBlazor)
- => _provider.FindBinFrameworkDir(config: config, forPublish: forPublish, framework: framework);
+ public string FindBlazorBinFrameworkDir(string config, bool forPublish, string framework = DefaultTargetFrameworkForBlazor, string? projectDir = null)
+ => _provider.FindBinFrameworkDir(config: config, forPublish: forPublish, framework: framework, projectDir: projectDir);
+
+ public string FindBlazorHostedBinFrameworkDir(string config, bool forPublish, string clientDirRelativeToProjectDir, string framework = DefaultTargetFrameworkForBlazor)
+ {
+ string? clientProjectDir = _projectDir == null ? null : Path.Combine(_projectDir, clientDirRelativeToProjectDir);
+ return _provider.FindBinFrameworkDir(config: config, forPublish: forPublish, framework: framework, projectDir: clientProjectDir);
+ }
}
diff --git a/src/mono/wasm/Wasm.Build.Tests/Blazor/SimpleMultiThreadedTests.cs b/src/mono/wasm/Wasm.Build.Tests/Blazor/SimpleMultiThreadedTests.cs
index bdee6a275a0e..c92fc5f35bf9 100644
--- a/src/mono/wasm/Wasm.Build.Tests/Blazor/SimpleMultiThreadedTests.cs
+++ b/src/mono/wasm/Wasm.Build.Tests/Blazor/SimpleMultiThreadedTests.cs
@@ -73,7 +73,7 @@ public class SimpleMultiThreadedTests : BlazorWasmTestBase
runOptions: new BlazorRunOptions(
Config: config,
ExtraArgs: "--web-server-use-cors --web-server-use-cop",
- OnConsoleMessage: (message) =>
+ OnConsoleMessage: (_, message) =>
{
if (message.Text.Contains("WasmEnableThreads=true"))
hasEmittedWasmEnableThreads = true;
diff --git a/src/mono/wasm/Wasm.Build.Tests/Blazor/WorkloadRequiredTests.cs b/src/mono/wasm/Wasm.Build.Tests/Blazor/WorkloadRequiredTests.cs
index deb8ad5def8e..0fd604cfea6e 100644
--- a/src/mono/wasm/Wasm.Build.Tests/Blazor/WorkloadRequiredTests.cs
+++ b/src/mono/wasm/Wasm.Build.Tests/Blazor/WorkloadRequiredTests.cs
@@ -120,7 +120,7 @@ public class WorkloadRequiredTests : BlazorWasmTestBase
{
Config = config,
Host = publish ? BlazorRunHost.WebServer : BlazorRunHost.DotnetRun,
- OnConsoleMessage = msg =>
+ OnConsoleMessage = (_, msg) =>
{
sbOutput.AppendLine(msg.Text);
}
diff --git a/src/mono/wasm/Wasm.Build.Tests/BrowserRunner.cs b/src/mono/wasm/Wasm.Build.Tests/BrowserRunner.cs
index 4aad869d9c17..997e2dc96b51 100644
--- a/src/mono/wasm/Wasm.Build.Tests/BrowserRunner.cs
+++ b/src/mono/wasm/Wasm.Build.Tests/BrowserRunner.cs
@@ -36,7 +36,8 @@ internal class BrowserRunner : IAsyncDisposable
public async Task<string> StartServerAndGetUrlAsync(
ToolCommand cmd,
- string args
+ string args,
+ Action<string>? onServerMessage = null
) {
TaskCompletionSource<string> urlAvailable = new();
Action<string?> outputHandler = msg =>
@@ -44,8 +45,12 @@ internal class BrowserRunner : IAsyncDisposable
if (string.IsNullOrEmpty(msg))
return;
+ onServerMessage?.Invoke(msg);
+
lock (OutputLines)
+ {
OutputLines.Add(msg);
+ }
Match m = s_appHostUrlRegex.Match(msg);
if (!m.Success)
@@ -91,7 +96,8 @@ internal class BrowserRunner : IAsyncDisposable
) {
var url = new Uri(browserUrl);
Playwright = await Microsoft.Playwright.Playwright.CreateAsync();
- string[] chromeArgs = new[] { $"--explicitly-allowed-ports={url.Port}" };
+ // codespaces: ignore certificate error -> Microsoft.Playwright.PlaywrightException : net::ERR_CERT_AUTHORITY_INVALID
+ string[] chromeArgs = new[] { $"--explicitly-allowed-ports={url.Port}", "--ignore-certificate-errors" };
_testOutput.WriteLine($"Launching chrome ('{s_chromePath.Value}') via playwright with args = {string.Join(',', chromeArgs)}");
return Browser = await Playwright.Chromium.LaunchAsync(new BrowserTypeLaunchOptions{
ExecutablePath = s_chromePath.Value,
@@ -105,21 +111,24 @@ internal class BrowserRunner : IAsyncDisposable
ToolCommand cmd,
string args,
bool headless = true,
- Action<IConsoleMessage>? onConsoleMessage = null,
+ Action<IPage, IConsoleMessage>? onConsoleMessage = null,
+ Action<IPage>? onPageLoaded = null,
+ Action<string>? onServerMessage = null,
Action<string>? onError = null,
Func<string, string>? modifyBrowserUrl = null)
{
- var urlString = await StartServerAndGetUrlAsync(cmd, args);
+ var urlString = await StartServerAndGetUrlAsync(cmd, args, onServerMessage);
var browser = await SpawnBrowserAsync(urlString, headless);
var context = await browser.NewContextAsync();
- return await RunAsync(context, urlString, headless, onConsoleMessage, onError, modifyBrowserUrl);
+ return await RunAsync(context, urlString, headless, onPageLoaded, onConsoleMessage, onError, modifyBrowserUrl);
}
public async Task<IPage> RunAsync(
IBrowserContext context,
string browserUrl,
bool headless = true,
- Action<IConsoleMessage>? onConsoleMessage = null,
+ Action<IPage>? onPageLoaded = null,
+ Action<IPage, IConsoleMessage>? onConsoleMessage = null,
Action<string>? onError = null,
Func<string, string>? modifyBrowserUrl = null,
bool resetExitedState = false
@@ -131,8 +140,11 @@ internal class BrowserRunner : IAsyncDisposable
browserUrl = modifyBrowserUrl(browserUrl);
IPage page = await context.NewPageAsync();
+ if (onPageLoaded is not null)
+ page.Load += (_, _) => onPageLoaded(page);
+
if (onConsoleMessage is not null)
- page.Console += (_, msg) => onConsoleMessage(msg);
+ page.Console += (_, msg) => onConsoleMessage(page, msg);
onError ??= _testOutput.WriteLine;
if (onError is not null)
diff --git a/src/mono/wasm/Wasm.Build.Tests/Common/TestOutputWrapper.cs b/src/mono/wasm/Wasm.Build.Tests/Common/TestOutputWrapper.cs
index a28657fa7bf0..03bef6c6ccb3 100644
--- a/src/mono/wasm/Wasm.Build.Tests/Common/TestOutputWrapper.cs
+++ b/src/mono/wasm/Wasm.Build.Tests/Common/TestOutputWrapper.cs
@@ -2,6 +2,7 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System;
+using System.Text;
using Xunit.Abstractions;
#nullable enable
@@ -10,9 +11,12 @@ namespace Wasm.Build.Tests;
public class TestOutputWrapper(ITestOutputHelper baseOutput) : ITestOutputHelper
{
+ private readonly StringBuilder _outputBuffer = new StringBuilder();
+
public void WriteLine(string message)
{
baseOutput.WriteLine(message);
+ _outputBuffer.AppendLine(message);
if (EnvironmentVariables.ShowBuildOutput)
Console.WriteLine(message);
}
@@ -20,7 +24,10 @@ public class TestOutputWrapper(ITestOutputHelper baseOutput) : ITestOutputHelper
public void WriteLine(string format, params object[] args)
{
baseOutput.WriteLine(format, args);
+ _outputBuffer.AppendFormat(format, args).AppendLine();
if (EnvironmentVariables.ShowBuildOutput)
Console.WriteLine(format, args);
}
+
+ public override string ToString() => _outputBuffer.ToString();
}
diff --git a/src/mono/wasm/Wasm.Build.Tests/ProjectProviderBase.cs b/src/mono/wasm/Wasm.Build.Tests/ProjectProviderBase.cs
index 81c6a4894f12..581a187270ae 100644
--- a/src/mono/wasm/Wasm.Build.Tests/ProjectProviderBase.cs
+++ b/src/mono/wasm/Wasm.Build.Tests/ProjectProviderBase.cs
@@ -490,10 +490,10 @@ public abstract class ProjectProviderBase(ITestOutputHelper _testOutput, string?
Assert.Equal(expected, actualFileNames);
}
- public virtual string FindBinFrameworkDir(string config, bool forPublish, string framework, string? bundleDirName = null)
+ public virtual string FindBinFrameworkDir(string config, bool forPublish, string framework, string? bundleDirName = null, string? projectDir = null)
{
EnsureProjectDirIsSet();
- string basePath = Path.Combine(ProjectDir!, "bin", config, framework);
+ string basePath = Path.Combine(projectDir ?? ProjectDir!, "bin", config, framework);
if (forPublish)
basePath = FindSubDirIgnoringCase(basePath, "publish");
diff --git a/src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/AppTestBase.cs b/src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/AppTestBase.cs
index 771daff1f5d4..7647b1658620 100644
--- a/src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/AppTestBase.cs
+++ b/src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/AppTestBase.cs
@@ -31,14 +31,48 @@ public abstract class AppTestBase : BlazorWasmTestBase
LogPath = Path.Combine(s_buildEnv.LogRootPath, Id);
Utils.DirectoryCopy(Path.Combine(BuildEnvironment.TestAssetsPath, assetName), Path.Combine(_projectDir!));
- // WasmBasicTestApp consists of App + Library projects
- if (assetName == "WasmBasicTestApp")
- _projectDir = Path.Combine(_projectDir!, "App");
+ switch(assetName)
+ {
+ case "WasmBasicTestApp":
+ // WasmBasicTestApp consists of App + Library projects
+ _projectDir = Path.Combine(_projectDir!, "App");
+ break;
+ case "BlazorHostedApp":
+ // BlazorHostedApp consists of BlazorHosted.Client and BlazorHosted.Server projects
+ _projectDir = Path.Combine(_projectDir!, "BlazorHosted.Server");
+ break;
+ }
+ }
+
+ protected void BlazorHostedBuild(
+ string config,
+ string assetName,
+ string clientDirRelativeToProjectDir = "",
+ string? generatedProjectNamePrefix = null,
+ RuntimeVariant runtimeType = RuntimeVariant.SingleThreaded)
+ {
+ CopyTestAsset(assetName, generatedProjectNamePrefix);
+ string frameworkDir = FindBlazorHostedBinFrameworkDir(config,
+ forPublish: false,
+ clientDirRelativeToProjectDir: clientDirRelativeToProjectDir);
+ BuildProject(configuration: config,
+ binFrameworkDir: frameworkDir,
+ runtimeType: runtimeType);
}
- protected void BuildProject(string configuration, params string[] extraArgs)
+ protected void BuildProject(
+ string configuration,
+ string? binFrameworkDir = null,
+ RuntimeVariant runtimeType = RuntimeVariant.SingleThreaded,
+ bool assertAppBundle = true,
+ params string[] extraArgs)
{
- (CommandResult result, _) = BlazorBuild(new BlazorBuildOptions(Id, configuration), extraArgs);
+ (CommandResult result, _) = BlazorBuild(new BlazorBuildOptions(
+ Id: Id,
+ Config: configuration,
+ BinFrameworkDir: binFrameworkDir,
+ RuntimeType: runtimeType,
+ AssertAppBundle: assertAppBundle), extraArgs);
result.EnsureSuccessful();
}
@@ -54,37 +88,42 @@ public abstract class AppTestBase : BlazorWasmTestBase
protected Task<RunResult> RunSdkStyleAppForBuild(RunOptions options)
=> RunSdkStyleApp(options, BlazorRunHost.DotnetRun);
-
+
protected Task<RunResult> RunSdkStyleAppForPublish(RunOptions options)
=> RunSdkStyleApp(options, BlazorRunHost.WebServer);
private async Task<RunResult> RunSdkStyleApp(RunOptions options, BlazorRunHost host = BlazorRunHost.DotnetRun)
{
- string queryString = "?test=" + options.TestScenario;
- if (options.BrowserQueryString != null)
- queryString += "&" + string.Join("&", options.BrowserQueryString.Select(kvp => $"{kvp.Key}={kvp.Value}"));
+ var query = options.BrowserQueryString ?? new Dictionary<string, string>();
+ if (!string.IsNullOrEmpty(options.TestScenario))
+ query.Add("test", options.TestScenario);
+
+ var queryString = query.Any() ? "?" + string.Join("&", query.Select(kvp => $"{kvp.Key}={kvp.Value}")) : "";
var tcs = new TaskCompletionSource<int>();
List<string> testOutput = new();
List<string> consoleOutput = new();
- Regex exitRegex = new Regex("WASM EXIT (?<exitCode>[0-9]+)$");
+ List<string> serverOutput = new();
+ Regex exitRegex = new Regex("(WASM EXIT (?<exitCode>[0-9]+)$)|(Program terminated with exit\\((?<exitCode>[0-9]+)\\))");
BlazorRunOptions blazorRunOptions = new(
CheckCounter: false,
Config: options.Configuration,
+ ServerEnvironment: options.ServerEnvironment,
+ OnPageLoaded: options.OnPageLoaded,
OnConsoleMessage: OnConsoleMessage,
+ OnServerMessage: OnServerMessage,
+ BrowserPath: options.BrowserPath,
QueryString: queryString,
Host: host);
await BlazorRunTest(blazorRunOptions);
- void OnConsoleMessage(IConsoleMessage msg)
+ void OnConsoleMessage(IPage page, IConsoleMessage msg)
{
consoleOutput.Add(msg.Text);
- const string testOutputPrefix = "TestOutput -> ";
- if (msg.Text.StartsWith(testOutputPrefix))
- testOutput.Add(msg.Text.Substring(testOutputPrefix.Length));
+ OnTestOutput(msg.Text);
var exitMatch = exitRegex.Match(msg.Text);
if (exitMatch.Success)
@@ -94,7 +133,23 @@ public abstract class AppTestBase : BlazorWasmTestBase
throw new Exception(msg.Text);
if (options.OnConsoleMessage != null)
- options.OnConsoleMessage(msg);
+ options.OnConsoleMessage(page, msg);
+ }
+
+ void OnServerMessage(string msg)
+ {
+ serverOutput.Add(msg);
+ OnTestOutput(msg);
+
+ if (options.OnServerMessage != null)
+ options.OnServerMessage(msg);
+ }
+
+ void OnTestOutput(string msg)
+ {
+ const string testOutputPrefix = "TestOutput -> ";
+ if (msg.StartsWith(testOutputPrefix))
+ testOutput.Add(msg.Substring(testOutputPrefix.Length));
}
//TimeSpan timeout = TimeSpan.FromMinutes(2);
@@ -106,20 +161,25 @@ public abstract class AppTestBase : BlazorWasmTestBase
if (options.ExpectedExitCode != null && wasmExitCode != options.ExpectedExitCode)
throw new Exception($"Expected exit code {options.ExpectedExitCode} but got {wasmExitCode}");
- return new(wasmExitCode, testOutput, consoleOutput);
+ return new(wasmExitCode, testOutput, consoleOutput, serverOutput);
}
protected record RunOptions(
string Configuration,
- string TestScenario,
+ string BrowserPath = "",
+ string? TestScenario = null,
Dictionary<string, string> BrowserQueryString = null,
- Action<IConsoleMessage> OnConsoleMessage = null,
+ Dictionary<string, string> ServerEnvironment = null,
+ Action<IPage> OnPageLoaded = null,
+ Action<IPage, IConsoleMessage> OnConsoleMessage = null,
+ Action<string> OnServerMessage = null,
int? ExpectedExitCode = 0
);
protected record RunResult(
int ExitCode,
IReadOnlyCollection<string> TestOutput,
- IReadOnlyCollection<string> ConsoleOutput
+ IReadOnlyCollection<string> ConsoleOutput,
+ IReadOnlyCollection<string> ServerOutput
);
}
diff --git a/src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/DebugLevelTests.cs b/src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/DebugLevelTests.cs
index 3dfe4c467f79..1bbe8691d80d 100644
--- a/src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/DebugLevelTests.cs
+++ b/src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/DebugLevelTests.cs
@@ -20,7 +20,7 @@ public class DebugLevelTests : AppTestBase
{
}
- private void AssertDebugLevel(RunResult result, int value)
+ private void AssertDebugLevel(RunResult result, int value)
{
Assert.Collection(
result.TestOutput,
@@ -51,7 +51,7 @@ public class DebugLevelTests : AppTestBase
public async Task BuildWithExplicitValue(string configuration, int debugLevel)
{
CopyTestAsset("WasmBasicTestApp", $"DebugLevelTests_BuildWithExplicitValue_{configuration}");
- BuildProject(configuration, $"-p:WasmDebugLevel={debugLevel}");
+ BuildProject(configuration: configuration, extraArgs: $"-p:WasmDebugLevel={debugLevel}");
var result = await RunSdkStyleAppForBuild(new(
Configuration: configuration,
diff --git a/src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/SignalRClientTests.cs b/src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/SignalRClientTests.cs
new file mode 100644
index 000000000000..b1e46d511006
--- /dev/null
+++ b/src/mono/wasm/Wasm.Build.Tests/TestAppScenarios/SignalRClientTests.cs
@@ -0,0 +1,84 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using System.Text.RegularExpressions;
+using System.Threading.Tasks;
+using Microsoft.Playwright;
+using Xunit.Abstractions;
+using Xunit;
+
+#nullable enable
+
+namespace Wasm.Build.Tests.TestAppScenarios;
+
+public class SignalRClientTests : AppTestBase
+{
+ public SignalRClientTests(ITestOutputHelper output, SharedBuildPerTestClassFixture buildContext)
+ : base(output, buildContext)
+ {
+ }
+
+ [ConditionalTheory(typeof(BuildTestBase), nameof(IsWorkloadWithMultiThreadingForDefaultFramework))]
+ [InlineData("Debug", "LongPolling")]
+ [InlineData("Release", "LongPolling")]
+ [InlineData("Debug", "WebSockets")]
+ [InlineData("Release", "WebSockets")]
+ public async Task SignalRPassMessages(string config, string transport)
+ {
+ BlazorHostedBuild(config,
+ assetName: "BlazorHostedApp",
+ clientDirRelativeToProjectDir: "../BlazorHosted.Client",
+ generatedProjectNamePrefix: "SignalRClientTests",
+ runtimeType: RuntimeVariant.MultiThreaded);
+
+ List<string> consoleOutput = new();
+ List<string> serverOutput = new();
+
+ var result = await RunSdkStyleAppForBuild(new(
+ Configuration: config,
+ // We are using build (not publish),
+ // we need to instruct static web assets to use manifest file,
+ // because wwwroot in bin doesn't contain all files (for build)
+ ServerEnvironment: new Dictionary<string, string> { ["ASPNETCORE_ENVIRONMENT"] = "Development" },
+ BrowserPath: "/chat",
+ BrowserQueryString: new Dictionary<string, string> { ["transport"] = transport, ["message"] = "ping" },
+ OnPageLoaded: async page => await page.ClickAsync("button#connectButton"),
+ OnServerMessage: (msg) => { serverOutput.Add(msg); },
+ OnConsoleMessage: async (page, msg) =>
+ {
+ consoleOutput.Add(msg.Text);
+ if (msg.Text.Contains("TestOutput ->"))
+ _testOutput.WriteLine(msg.Text);
+
+ if (msg.Text.Contains("SignalR connected"))
+ await page.ClickAsync("button#subscribeButton");
+
+ if (msg.Text.Contains("Subscribed to ReceiveMessage"))
+ await page.ClickAsync("button#sendMessageButton");
+
+ if (msg.Text.Contains("ReceiveMessage from server"))
+ await page.ClickAsync("button#exitProgramButton");
+ }
+ ));
+
+ string output = _testOutput.ToString() ?? "";
+ Assert.NotEmpty(output);
+ // check sending and receiving threadId
+ string threadIdUsedForSending = GetThreadOfAction(output, @"SignalRPassMessages was sent by CurrentManagedThreadId=(\d+)", "signalR message was sent");
+ string threadIdUsedForReceiving = GetThreadOfAction(output, @"ReceiveMessage from server on CurrentManagedThreadId=(\d+)", "signalR message was received");
+ Assert.True("1" != threadIdUsedForSending || "1" != threadIdUsedForReceiving,
+ $"Expected to send/receive with signalR in non-UI threads, instead only CurrentManagedThreadId=1 was used. TestOutput: {output}.");
+ }
+
+ private string GetThreadOfAction(string testOutput, string pattern, string actionDescription)
+ {
+ Match match = Regex.Match(testOutput, pattern);
+ Assert.True(match.Success, $"Expected to find a log that {actionDescription}. TestOutput: {testOutput}.");
+ return match.Groups[1].Value ?? "";
+ }
+}
diff --git a/src/mono/wasm/Wasm.Build.Tests/TestMainJsProjectProvider.cs b/src/mono/wasm/Wasm.Build.Tests/TestMainJsProjectProvider.cs
index 7042c4855549..89a1ddc8c132 100644
--- a/src/mono/wasm/Wasm.Build.Tests/TestMainJsProjectProvider.cs
+++ b/src/mono/wasm/Wasm.Build.Tests/TestMainJsProjectProvider.cs
@@ -108,9 +108,9 @@ public class TestMainJsProjectProvider : ProjectProviderBase
AssertBundle(assertOptions);
}
- public override string FindBinFrameworkDir(string config, bool forPublish, string framework, string? bundleDirName = null)
+ public override string FindBinFrameworkDir(string config, bool forPublish, string framework, string? bundleDirName = null, string? projectDir = null)
{
EnsureProjectDirIsSet();
- return Path.Combine(ProjectDir!, "bin", config, framework, "browser-wasm", bundleDirName ?? this.BundleDirName, "_framework");
+ return Path.Combine(projectDir ?? ProjectDir!, "bin", config, framework, "browser-wasm", bundleDirName ?? this.BundleDirName, "_framework");
}
}
diff --git a/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/App.razor b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/App.razor
new file mode 100644
index 000000000000..6fd3ed1b5a3b
--- /dev/null
+++ b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/App.razor
@@ -0,0 +1,12 @@
+<Router AppAssembly="@typeof(App).Assembly">
+ <Found Context="routeData">
+ <RouteView RouteData="@routeData" DefaultLayout="@typeof(MainLayout)" />
+ <FocusOnNavigate RouteData="@routeData" Selector="h1" />
+ </Found>
+ <NotFound>
+ <PageTitle>Not found</PageTitle>
+ <LayoutView Layout="@typeof(MainLayout)">
+ <p role="alert">Sorry, there's nothing at this address.</p>
+ </LayoutView>
+ </NotFound>
+</Router>
diff --git a/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/BlazorHosted.Client.csproj b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/BlazorHosted.Client.csproj
new file mode 100644
index 000000000000..314f3453a08e
--- /dev/null
+++ b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/BlazorHosted.Client.csproj
@@ -0,0 +1,19 @@
+<Project Sdk="Microsoft.NET.Sdk.BlazorWebAssembly">
+
+ <PropertyGroup>
+ <TargetFramework>net9.0</TargetFramework>
+ <Nullable>enable</Nullable>
+ <ImplicitUsings>enable</ImplicitUsings>
+ <WasmEnableThreads>true</WasmEnableThreads>
+ <!-- nullablility warning, async warning -->
+ <NoWarn>CS8604;CS4014</NoWarn>
+ </PropertyGroup>
+
+ <!-- versions are pinned but when run from WBT level, it's taking in-tree runtime -->
+ <ItemGroup>
+ <PackageReference Include="Microsoft.AspNetCore.Components.WebAssembly" Version="9.0.0-preview.1.24081.5" />
+ <PackageReference Include="Microsoft.AspNetCore.Components.WebAssembly.DevServer" Version="9.0.0-preview.1.24081.5" PrivateAssets="all" />
+ <PackageReference Include="Microsoft.AspNetCore.SignalR.Client" Version="8.0.1" />
+ <PackageReference Include="System.Net.Http.Json" Version="8.0.0" />
+ </ItemGroup>
+</Project>
diff --git a/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Helper.cs b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Helper.cs
new file mode 100644
index 000000000000..38ead1438099
--- /dev/null
+++ b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Helper.cs
@@ -0,0 +1,42 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Collections.Specialized;
+using Microsoft.AspNetCore.Http.Connections;
+
+namespace BlazorHosted.Client;
+
+public static class Helper
+{
+ public static string GetValue(NameValueCollection parameters, string key)
+ {
+ var values = parameters.GetValues(key);
+ if (values == null || values.Length == 0)
+ {
+ throw new Exception($"Parameter '{key}' is required in the query string");
+ }
+ if (values.Length > 1)
+ {
+ throw new Exception($"Parameter '{key}' should be unique in the query string");
+ }
+ return values[0];
+ }
+
+ public static HttpTransportType StringToTransportType(string transport)
+ {
+ switch (transport.ToLowerInvariant())
+ {
+ case "longpolling":
+ return HttpTransportType.LongPolling;
+ case "websockets":
+ return HttpTransportType.WebSockets;
+ default:
+ throw new Exception($"{transport} is invalid transport type");
+ }
+ }
+
+ public static void TestOutputWriteLine(string message)
+ {
+ Console.WriteLine("TestOutput -> " + message);
+ }
+}
diff --git a/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Layout/MainLayout.razor b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Layout/MainLayout.razor
new file mode 100644
index 000000000000..f4d8cbb8e66e
--- /dev/null
+++ b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Layout/MainLayout.razor
@@ -0,0 +1,13 @@
+@inherits LayoutComponentBase
+
+<div class="page">
+ <main>
+ <div class="top-row px-4">
+ <a href="https://learn.microsoft.com/aspnet/core/" target="_blank">About</a>
+ </div>
+
+ <article class="content px-4">
+ @Body
+ </article>
+ </main>
+</div>
diff --git a/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Pages/Chat.razor b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Pages/Chat.razor
new file mode 100644
index 000000000000..d8908fd4a9f9
--- /dev/null
+++ b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Pages/Chat.razor
@@ -0,0 +1,105 @@
+@page "/chat"
+@using Microsoft.AspNetCore.SignalR
+@using Microsoft.AspNetCore.SignalR.Client
+@using Microsoft.AspNetCore.Http.Connections;
+@using System.Web;
+@inject NavigationManager NavigationManager
+@inject IJSRuntime JSRuntime
+
+<h1>Chat Room</h1>
+<button id="connectButton" @onclick="Connect">Connect SignalR</button>
+<button id="subscribeButton" @onclick="Subscribe">Subscribe to server messages</button>
+<button id="sendMessageButton" @onclick="SignalRPassMessages">Send message from query</button>
+<button id="exitProgramButton" @onclick="SendExitSignal">Send exit signal to server</button>
+<button id="disconnectButton" @onclick="DisposeHubConnection">Disconnect SignalR</button>
+<div>
+ @foreach (var chatMessage in chatMessages)
+ {
+ <p>@chatMessage</p>
+ }
+</div>
+
+@code {
+ private string _hubUrl = string.Empty;
+ private HubConnection? _hubConnection;
+ private string message = string.Empty;
+ private string transport = string.Empty;
+ private List<string> chatMessages = new List<string>();
+ private string wrongQueryError = "Query string with parameters 'message' and 'transport' are required";
+
+ protected override void OnInitialized()
+ {
+ Helper.TestOutputWriteLine($"OnInitialized on CurrentManagedThreadId={Environment.CurrentManagedThreadId}");
+ GetQueryParameters();
+ }
+
+ private void GetQueryParameters()
+ {
+ var uri = new Uri(NavigationManager.Uri);
+ if (string.IsNullOrEmpty(uri.Query))
+ {
+ throw new Exception(wrongQueryError);
+ }
+ var parameters = HttpUtility.ParseQueryString(uri.Query);
+ if (parameters == null)
+ {
+ throw new Exception(wrongQueryError);
+ }
+ transport = Helper.GetValue(parameters, "transport");
+ message = $"{transport} {Helper.GetValue(parameters, "message")}" ;
+ Helper.TestOutputWriteLine($"GetQueryParameters on CurrentManagedThreadId={Environment.CurrentManagedThreadId} finished");
+ }
+
+ private async Task Connect()
+ {
+ _hubUrl = NavigationManager.BaseUri + "chathub";
+ HttpTransportType httpTransportType = Helper.StringToTransportType(transport);
+ _hubConnection = new HubConnectionBuilder()
+ .WithUrl(_hubUrl, options =>
+ {
+ options.Transports = httpTransportType;
+ })
+ .Build();
+
+ await _hubConnection.StartAsync();
+ Helper.TestOutputWriteLine($"SignalR connected by CurrentManagedThreadId={Environment.CurrentManagedThreadId}");
+ }
+
+ private void Subscribe()
+ {
+ _hubConnection.On<string>("ReceiveMessage", (message) =>
+ {
+ Helper.TestOutputWriteLine($"Message = [{message}]. ReceiveMessage from server on CurrentManagedThreadId={Environment.CurrentManagedThreadId}");
+ chatMessages.Add(message);
+ });
+ Helper.TestOutputWriteLine($"Subscribed to ReceiveMessage by CurrentManagedThreadId={Environment.CurrentManagedThreadId}");
+ }
+
+ private async Task SignalRPassMessages() =>
+ await Task.Run(async () =>
+ {
+ await _hubConnection.SendAsync( "SendMessage", message, Environment.CurrentManagedThreadId);
+ Helper.TestOutputWriteLine($"SignalRPassMessages was sent by CurrentManagedThreadId={Environment.CurrentManagedThreadId}");
+ });
+
+ private async Task SendExitSignal()
+ {
+ // exit the server
+ await _hubConnection.SendAsync("Exit", 0);
+ await DisposeHubConnection();
+ Helper.TestOutputWriteLine($"Exit signal was sent by CurrentManagedThreadId={Environment.CurrentManagedThreadId}");
+ // exit the client
+ await JSRuntime.InvokeVoidAsync("eval", "import('./dotnet.js').then(module => { module.dotnet; module.exit(0); });");
+ }
+
+ private async Task DisposeHubConnection()
+ {
+ if (_hubConnection != null)
+ {
+ _hubConnection.Remove("ReceiveMessage");
+ await _hubConnection.DisposeAsync();
+ _hubConnection = null;
+ }
+ Helper.TestOutputWriteLine($"SignalR disconnected by CurrentManagedThreadId={Environment.CurrentManagedThreadId}");
+ }
+}
diff --git a/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Program.cs b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Program.cs
new file mode 100644
index 000000000000..67a2fb06d6a1
--- /dev/null
+++ b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/Program.cs
@@ -0,0 +1,13 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using BlazorHosted.Client;
+using Microsoft.AspNetCore.Components.Web;
+using Microsoft.AspNetCore.Components.WebAssembly.Hosting;
+
+var builder = WebAssemblyHostBuilder.CreateDefault(args);
+builder.RootComponents.Add<App>("#app");
+builder.RootComponents.Add<HeadOutlet>("head::after");
+builder.Services.AddScoped(sp => new HttpClient { BaseAddress = new Uri(builder.HostEnvironment.BaseAddress) });
+
+await builder.Build().RunAsync().ConfigureAwait(false);
diff --git a/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/_Imports.razor b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/_Imports.razor
new file mode 100644
index 000000000000..d39afd384f89
--- /dev/null
+++ b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/_Imports.razor
@@ -0,0 +1,6 @@
+@using Microsoft.AspNetCore.Components.Forms
+@using Microsoft.AspNetCore.Components.Routing
+@using Microsoft.AspNetCore.Components.Web
+@using Microsoft.JSInterop
+@using BlazorHosted.Client
+@using BlazorHosted.Client.Layout \ No newline at end of file
diff --git a/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/wwwroot/favicon.ico b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/wwwroot/favicon.ico
new file mode 100644
index 000000000000..63e859b476ef
--- /dev/null
+++ b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/wwwroot/favicon.ico
Binary files differ
diff --git a/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/wwwroot/index.html b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/wwwroot/index.html
new file mode 100644
index 000000000000..6ab069a76646
--- /dev/null
+++ b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Client/wwwroot/index.html
@@ -0,0 +1,22 @@
+<!DOCTYPE html>
+<html lang="en">
+
+<head>
+ <meta charset="utf-8" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no" />
+ <title>BlazorHosted</title>
+ <base href="/" />
+</head>
+
+<body>
+ <div id="app">Loading...</div>
+
+ <div id="blazor-error-ui">
+ An unhandled error has occurred.
+ <a href="" class="reload">Reload</a>
+ <a class="dismiss">🗙</a>
+ </div>
+ <script src="_framework/blazor.webassembly.js"></script>
+</body>
+
+</html>
diff --git a/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Server/BlazorHosted.Server.csproj b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Server/BlazorHosted.Server.csproj
new file mode 100644
index 000000000000..cc3ac1aae891
--- /dev/null
+++ b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Server/BlazorHosted.Server.csproj
@@ -0,0 +1,19 @@
+<Project Sdk="Microsoft.NET.Sdk.Web">
+
+ <PropertyGroup>
+ <TargetFramework>net9.0</TargetFramework>
+ <Nullable>enable</Nullable>
+ <ImplicitUsings>enable</ImplicitUsings>
+ <!-- configure await warnings -->
+ <NoWarn>CA2007</NoWarn>
+ </PropertyGroup>
+
+ <ItemGroup>
+ <PackageReference Include="Microsoft.AspNetCore.Components.WebAssembly.Server" Version="9.0.0-preview.1.24081.5" />
+ </ItemGroup>
+
+ <ItemGroup>
+ <ProjectReference Include="..\BlazorHosted.Client\BlazorHosted.Client.csproj" />
+ </ItemGroup>
+
+</Project>
diff --git a/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Server/ChatHub.cs b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Server/ChatHub.cs
new file mode 100644
index 000000000000..39a97cc4a09e
--- /dev/null
+++ b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Server/ChatHub.cs
@@ -0,0 +1,21 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Microsoft.AspNetCore.SignalR;
+
+namespace BlazorHosted.Server.Hubs;
+public class ChatHub : Hub
+{
+ public async Task SendMessage(string message, int sendingThreadId)
+ {
+ Console.WriteLine($"Server: receives Message=[{message}] sent by treadID = {sendingThreadId} and sends it back.");
+ string changedMessage = $"{message}-pong";
+ await Clients.All.SendAsync("ReceiveMessage", changedMessage).ConfigureAwait(false);
+ }
+
+ public void Exit(int code)
+ {
+ Console.WriteLine($"Received exit code {code} from client.");
+ Environment.Exit(code);
+ }
+}
diff --git a/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Server/Program.cs b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Server/Program.cs
new file mode 100644
index 000000000000..fea18a9250cc
--- /dev/null
+++ b/src/mono/wasm/testassets/BlazorHostedApp/BlazorHosted.Server/Program.cs
@@ -0,0 +1,52 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Microsoft.Extensions.Configuration;
+using System;
+using Microsoft.Extensions.Logging;
+using BlazorHosted.Server.Hubs;
+
+var builder = WebApplication.CreateBuilder(args);
+
+builder.Services.AddControllersWithViews();
+builder.Services.AddRazorPages();
+builder.Services.AddSignalR(options =>
+{
+ options.KeepAliveInterval = TimeSpan.Zero; // minimize keep-alive messages
+});
+
+var app = builder.Build();
+
+// Configure the HTTP request pipeline.
+if (app.Environment.IsDevelopment())
+{
+ app.UseWebAssemblyDebugging();
+}
+else
+{
+ app.UseExceptionHandler("/Error");
+ // The default HSTS value is 30 days. You may want to change this for production scenarios, see https://aka.ms/aspnetcore-hsts.
+ app.UseHsts();
+}
+
+// Add headers to enable SharedArrayBuffer
+app.Use(async (context, next) =>
+{
+ var response = context.Response;
+ response.Headers.Append("Cross-Origin-Opener-Policy", "same-origin");
+ response.Headers.Append("Cross-Origin-Embedder-Policy", "require-corp");
+
+ await next();
+});
+app.UseBlazorFrameworkFiles();
+app.UseStaticFiles();
+
+app.UseRouting();
+
+app.MapRazorPages();
+app.MapControllers();
+app.MapFallbackToFile("index.html");
+
+app.MapHub<ChatHub>("/chathub");
+
+app.Run();
diff --git a/src/mono/wasm/testassets/WasmBasicTestApp/App/wwwroot/README.md b/src/mono/wasm/testassets/WasmBasicTestApp/App/wwwroot/README.md
new file mode 100644
index 000000000000..996992663189
--- /dev/null
+++ b/src/mono/wasm/testassets/WasmBasicTestApp/App/wwwroot/README.md
@@ -0,0 +1,15 @@
+## WasmBasicTestApp
+
+This is a test application used by various Wasm.Build.Tests. The idea is to share a common behavior (so that we don't have to maintain many test apps) and tweak it for the test case.
+It typically suits scenario where you need more than a plain template app. If the test case is too different, feel free to create another app.
+
+### Usage
+
+The app reads `test` query parameter and uses it to switch between test cases. Entrypoint is `main.js`.
+There is common unit, then switch based on test case for modifying app startup, then app starts and executes next switch based on test case for actually running code.
+
+Some test cases passes additional parameters to differentiate behavior, see `src/mono/wasm/Wasm.Build.Tests/TestAppScenarios`.
+
+### Running out side of WBT
+
+One of the benefits is that you can copy the app out of intree and run the app without running Wasm.Build.Tests with just `dotnet run`. \ No newline at end of file
diff --git a/src/mono/wasm/testassets/WasmBasicTestApp/App/wwwroot/main.js b/src/mono/wasm/testassets/WasmBasicTestApp/App/wwwroot/main.js
index 2e1b7ff8c84f..3a01053875c0 100644
--- a/src/mono/wasm/testassets/WasmBasicTestApp/App/wwwroot/main.js
+++ b/src/mono/wasm/testassets/WasmBasicTestApp/App/wwwroot/main.js
@@ -33,6 +33,7 @@ switch (testCase) {
Math.floor(Math.random() * 5) + 5,
Math.floor(Math.random() * 5) + 10
];
+ console.log(`Failing test at assembly indexes [${failAtAssemblyNumbers.join(", ")}]`);
let alreadyFailed = [];
dotnet.withDiagnosticTracing(true).withResourceLoader((type, name, defaultUri, integrity, behavior) => {
if (type === "dotnetjs") {
@@ -45,8 +46,8 @@ switch (testCase) {
return defaultUri;
}
- assemblyCounter++;
- if (!failAtAssemblyNumbers.includes(assemblyCounter) || alreadyFailed.includes(defaultUri))
+ const currentCounter = assemblyCounter++;
+ if (!failAtAssemblyNumbers.includes(currentCounter) || alreadyFailed.includes(defaultUri))
return defaultUri;
alreadyFailed.push(defaultUri);
diff --git a/src/native/corehost/apphost/static/singlefilehost.def b/src/native/corehost/apphost/static/singlefilehost.def
index 6052b832b0b0..e1208056b832 100644
--- a/src/native/corehost/apphost/static/singlefilehost.def
+++ b/src/native/corehost/apphost/static/singlefilehost.def
@@ -13,5 +13,8 @@ CLRJitAttachState @3 data
; needed by SOS, WinDBG, and Watson. This must remain ordinal 4.
DotNetRuntimeInfo @4 data
+; DAC table export
+g_dacTable = s_dacGlobals
+
; Used by profilers
MetaDataGetDispenser
diff --git a/src/native/corehost/json_parser.h b/src/native/corehost/json_parser.h
index 2c2845aac46b..d7393b0ae678 100644
--- a/src/native/corehost/json_parser.h
+++ b/src/native/corehost/json_parser.h
@@ -8,12 +8,22 @@
// https://github.com/Tencent/rapidjson/issues/1596#issuecomment-548774663
#define RAPIDJSON_48BITPOINTER_OPTIMIZATION 0
+// see https://github.com/Tencent/rapidjson/issues/1448
+// including windows.h on purpose to provoke a compile time problem as GetObject is a
+// macro that gets defined when windows.h is included
+#ifdef _WIN32
+#define NOMINMAX
+#include <windows.h>
+#endif
+
#include "pal.h"
#include <external/rapidjson/document.h>
#include <external/rapidjson/fwd.h>
#include <vector>
#include "bundle/info.h"
+#undef GetObject
+
class json_parser_t {
public:
#ifdef _WIN32
diff --git a/src/native/external/rapidjson-version.txt b/src/native/external/rapidjson-version.txt
index 0ccc08a3c223..b6f5f9532a7d 100644
--- a/src/native/external/rapidjson-version.txt
+++ b/src/native/external/rapidjson-version.txt
@@ -1,6 +1,6 @@
-d87b698d0fcc10a5f632ecbc80a9cb2a8fa094a5
+3f73edae00aba5b0112a80b4d41e6f1ff7d92a3d
-https://github.com/Tencent/rapidjson/commit/d87b698d0fcc10a5f632ecbc80a9cb2a8fa094a5
+https://github.com/Tencent/rapidjson/commit/3f73edae00aba5b0112a80b4d41e6f1ff7d92a3d
Note: This library is not using a proper release lifecycle. v1.1.0 was the last version released in 2016.
- Therefore, we are pointing to a random commit from 2019 rather than a version tag.
+ Therefore, we are pointing to a random commit from 2024 rather than a version tag.
diff --git a/src/native/external/rapidjson/README.TXT b/src/native/external/rapidjson/README.TXT
index bc0a70382f4a..9eff509a934d 100644
--- a/src/native/external/rapidjson/README.TXT
+++ b/src/native/external/rapidjson/README.TXT
@@ -1,2 +1,2 @@
-This directory contains the contents of `include/rapidjson` from
-<https://github.com/tencent/rapidjson>, commit hash d87b698d0fcc10.
+This directory contains selective files from
+https://github.com/Tencent/rapidjson/tree/3f73edae00aba5b0112a80b4d41e6f1ff7d92a3d/include/rapidjson
diff --git a/src/native/external/rapidjson/allocators.h b/src/native/external/rapidjson/allocators.h
index cc67c8971323..275417bd8b37 100644
--- a/src/native/external/rapidjson/allocators.h
+++ b/src/native/external/rapidjson/allocators.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
@@ -16,6 +16,14 @@
#define RAPIDJSON_ALLOCATORS_H_
#include "rapidjson.h"
+#include "internal/meta.h"
+
+#include <memory>
+#include <limits>
+
+#if RAPIDJSON_HAS_CXX11
+#include <type_traits>
+#endif
RAPIDJSON_NAMESPACE_BEGIN
@@ -77,19 +85,26 @@ public:
static const bool kNeedFree = true;
void* Malloc(size_t size) {
if (size) // behavior of malloc(0) is implementation defined.
- return std::malloc(size);
+ return RAPIDJSON_MALLOC(size);
else
return NULL; // standardize to returning NULL.
}
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) {
(void)originalSize;
if (newSize == 0) {
- std::free(originalPtr);
+ RAPIDJSON_FREE(originalPtr);
return NULL;
}
- return std::realloc(originalPtr, newSize);
+ return RAPIDJSON_REALLOC(originalPtr, newSize);
+ }
+ static void Free(void *ptr) RAPIDJSON_NOEXCEPT { RAPIDJSON_FREE(ptr); }
+
+ bool operator==(const CrtAllocator&) const RAPIDJSON_NOEXCEPT {
+ return true;
+ }
+ bool operator!=(const CrtAllocator&) const RAPIDJSON_NOEXCEPT {
+ return false;
}
- static void Free(void *ptr) { std::free(ptr); }
};
///////////////////////////////////////////////////////////////////////////////
@@ -113,16 +128,64 @@ public:
*/
template <typename BaseAllocator = CrtAllocator>
class MemoryPoolAllocator {
+ //! Chunk header for perpending to each chunk.
+ /*! Chunks are stored as a singly linked list.
+ */
+ struct ChunkHeader {
+ size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself).
+ size_t size; //!< Current size of allocated memory in bytes.
+ ChunkHeader *next; //!< Next chunk in the linked list.
+ };
+
+ struct SharedData {
+ ChunkHeader *chunkHead; //!< Head of the chunk linked-list. Only the head chunk serves allocation.
+ BaseAllocator* ownBaseAllocator; //!< base allocator created by this object.
+ size_t refcount;
+ bool ownBuffer;
+ };
+
+ static const size_t SIZEOF_SHARED_DATA = RAPIDJSON_ALIGN(sizeof(SharedData));
+ static const size_t SIZEOF_CHUNK_HEADER = RAPIDJSON_ALIGN(sizeof(ChunkHeader));
+
+ static inline ChunkHeader *GetChunkHead(SharedData *shared)
+ {
+ return reinterpret_cast<ChunkHeader*>(reinterpret_cast<uint8_t*>(shared) + SIZEOF_SHARED_DATA);
+ }
+ static inline uint8_t *GetChunkBuffer(SharedData *shared)
+ {
+ return reinterpret_cast<uint8_t*>(shared->chunkHead) + SIZEOF_CHUNK_HEADER;
+ }
+
+ static const size_t kDefaultChunkCapacity = RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY; //!< Default chunk capacity.
+
public:
static const bool kNeedFree = false; //!< Tell users that no need to call Free() with this allocator. (concept Allocator)
+ static const bool kRefCounted = true; //!< Tell users that this allocator is reference counted on copy
//! Constructor with chunkSize.
/*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
\param baseAllocator The allocator for allocating memory chunks.
*/
+ explicit
MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
- chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(0), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
+ chunk_capacity_(chunkSize),
+ baseAllocator_(baseAllocator ? baseAllocator : RAPIDJSON_NEW(BaseAllocator)()),
+ shared_(static_cast<SharedData*>(baseAllocator_ ? baseAllocator_->Malloc(SIZEOF_SHARED_DATA + SIZEOF_CHUNK_HEADER) : 0))
{
+ RAPIDJSON_ASSERT(baseAllocator_ != 0);
+ RAPIDJSON_ASSERT(shared_ != 0);
+ if (baseAllocator) {
+ shared_->ownBaseAllocator = 0;
+ }
+ else {
+ shared_->ownBaseAllocator = baseAllocator_;
+ }
+ shared_->chunkHead = GetChunkHead(shared_);
+ shared_->chunkHead->capacity = 0;
+ shared_->chunkHead->size = 0;
+ shared_->chunkHead->next = 0;
+ shared_->ownBuffer = true;
+ shared_->refcount = 1;
}
//! Constructor with user-supplied buffer.
@@ -136,41 +199,101 @@ public:
\param baseAllocator The allocator for allocating memory chunks.
*/
MemoryPoolAllocator(void *buffer, size_t size, size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
- chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(buffer), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
+ chunk_capacity_(chunkSize),
+ baseAllocator_(baseAllocator),
+ shared_(static_cast<SharedData*>(AlignBuffer(buffer, size)))
+ {
+ RAPIDJSON_ASSERT(size >= SIZEOF_SHARED_DATA + SIZEOF_CHUNK_HEADER);
+ shared_->chunkHead = GetChunkHead(shared_);
+ shared_->chunkHead->capacity = size - SIZEOF_SHARED_DATA - SIZEOF_CHUNK_HEADER;
+ shared_->chunkHead->size = 0;
+ shared_->chunkHead->next = 0;
+ shared_->ownBaseAllocator = 0;
+ shared_->ownBuffer = false;
+ shared_->refcount = 1;
+ }
+
+ MemoryPoolAllocator(const MemoryPoolAllocator& rhs) RAPIDJSON_NOEXCEPT :
+ chunk_capacity_(rhs.chunk_capacity_),
+ baseAllocator_(rhs.baseAllocator_),
+ shared_(rhs.shared_)
+ {
+ RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
+ ++shared_->refcount;
+ }
+ MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) RAPIDJSON_NOEXCEPT
{
- RAPIDJSON_ASSERT(buffer != 0);
- RAPIDJSON_ASSERT(size > sizeof(ChunkHeader));
- chunkHead_ = reinterpret_cast<ChunkHeader*>(buffer);
- chunkHead_->capacity = size - sizeof(ChunkHeader);
- chunkHead_->size = 0;
- chunkHead_->next = 0;
+ RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
+ ++rhs.shared_->refcount;
+ this->~MemoryPoolAllocator();
+ baseAllocator_ = rhs.baseAllocator_;
+ chunk_capacity_ = rhs.chunk_capacity_;
+ shared_ = rhs.shared_;
+ return *this;
}
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ MemoryPoolAllocator(MemoryPoolAllocator&& rhs) RAPIDJSON_NOEXCEPT :
+ chunk_capacity_(rhs.chunk_capacity_),
+ baseAllocator_(rhs.baseAllocator_),
+ shared_(rhs.shared_)
+ {
+ RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
+ rhs.shared_ = 0;
+ }
+ MemoryPoolAllocator& operator=(MemoryPoolAllocator&& rhs) RAPIDJSON_NOEXCEPT
+ {
+ RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
+ this->~MemoryPoolAllocator();
+ baseAllocator_ = rhs.baseAllocator_;
+ chunk_capacity_ = rhs.chunk_capacity_;
+ shared_ = rhs.shared_;
+ rhs.shared_ = 0;
+ return *this;
+ }
+#endif
+
//! Destructor.
/*! This deallocates all memory chunks, excluding the user-supplied buffer.
*/
- ~MemoryPoolAllocator() {
+ ~MemoryPoolAllocator() RAPIDJSON_NOEXCEPT {
+ if (!shared_) {
+ // do nothing if moved
+ return;
+ }
+ if (shared_->refcount > 1) {
+ --shared_->refcount;
+ return;
+ }
Clear();
- RAPIDJSON_DELETE(ownBaseAllocator_);
+ BaseAllocator *a = shared_->ownBaseAllocator;
+ if (shared_->ownBuffer) {
+ baseAllocator_->Free(shared_);
+ }
+ RAPIDJSON_DELETE(a);
}
- //! Deallocates all memory chunks, excluding the user-supplied buffer.
- void Clear() {
- while (chunkHead_ && chunkHead_ != userBuffer_) {
- ChunkHeader* next = chunkHead_->next;
- baseAllocator_->Free(chunkHead_);
- chunkHead_ = next;
+ //! Deallocates all memory chunks, excluding the first/user one.
+ void Clear() RAPIDJSON_NOEXCEPT {
+ RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
+ for (;;) {
+ ChunkHeader* c = shared_->chunkHead;
+ if (!c->next) {
+ break;
+ }
+ shared_->chunkHead = c->next;
+ baseAllocator_->Free(c);
}
- if (chunkHead_ && chunkHead_ == userBuffer_)
- chunkHead_->size = 0; // Clear user buffer
+ shared_->chunkHead->size = 0;
}
//! Computes the total capacity of allocated memory chunks.
/*! \return total capacity in bytes.
*/
- size_t Capacity() const {
+ size_t Capacity() const RAPIDJSON_NOEXCEPT {
+ RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
size_t capacity = 0;
- for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
+ for (ChunkHeader* c = shared_->chunkHead; c != 0; c = c->next)
capacity += c->capacity;
return capacity;
}
@@ -178,25 +301,35 @@ public:
//! Computes the memory blocks allocated.
/*! \return total used bytes.
*/
- size_t Size() const {
+ size_t Size() const RAPIDJSON_NOEXCEPT {
+ RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
size_t size = 0;
- for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
+ for (ChunkHeader* c = shared_->chunkHead; c != 0; c = c->next)
size += c->size;
return size;
}
+ //! Whether the allocator is shared.
+ /*! \return true or false.
+ */
+ bool Shared() const RAPIDJSON_NOEXCEPT {
+ RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
+ return shared_->refcount > 1;
+ }
+
//! Allocates a memory block. (concept Allocator)
void* Malloc(size_t size) {
+ RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
if (!size)
return NULL;
size = RAPIDJSON_ALIGN(size);
- if (chunkHead_ == 0 || chunkHead_->size + size > chunkHead_->capacity)
+ if (RAPIDJSON_UNLIKELY(shared_->chunkHead->size + size > shared_->chunkHead->capacity))
if (!AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size))
return NULL;
- void *buffer = reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size;
- chunkHead_->size += size;
+ void *buffer = GetChunkBuffer(shared_) + shared_->chunkHead->size;
+ shared_->chunkHead->size += size;
return buffer;
}
@@ -205,6 +338,7 @@ public:
if (originalPtr == 0)
return Malloc(newSize);
+ RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
if (newSize == 0)
return NULL;
@@ -216,10 +350,10 @@ public:
return originalPtr;
// Simply expand it if it is the last allocation and there is sufficient space
- if (originalPtr == reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size - originalSize) {
+ if (originalPtr == GetChunkBuffer(shared_) + shared_->chunkHead->size - originalSize) {
size_t increment = static_cast<size_t>(newSize - originalSize);
- if (chunkHead_->size + increment <= chunkHead_->capacity) {
- chunkHead_->size += increment;
+ if (shared_->chunkHead->size + increment <= shared_->chunkHead->capacity) {
+ shared_->chunkHead->size += increment;
return originalPtr;
}
}
@@ -235,50 +369,325 @@ public:
}
//! Frees a memory block (concept Allocator)
- static void Free(void *ptr) { (void)ptr; } // Do nothing
+ static void Free(void *ptr) RAPIDJSON_NOEXCEPT { (void)ptr; } // Do nothing
-private:
- //! Copy constructor is not permitted.
- MemoryPoolAllocator(const MemoryPoolAllocator& rhs) /* = delete */;
- //! Copy assignment operator is not permitted.
- MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) /* = delete */;
+ //! Compare (equality) with another MemoryPoolAllocator
+ bool operator==(const MemoryPoolAllocator& rhs) const RAPIDJSON_NOEXCEPT {
+ RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
+ RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
+ return shared_ == rhs.shared_;
+ }
+ //! Compare (inequality) with another MemoryPoolAllocator
+ bool operator!=(const MemoryPoolAllocator& rhs) const RAPIDJSON_NOEXCEPT {
+ return !operator==(rhs);
+ }
+private:
//! Creates a new chunk.
/*! \param capacity Capacity of the chunk in bytes.
\return true if success.
*/
bool AddChunk(size_t capacity) {
if (!baseAllocator_)
- ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator)();
- if (ChunkHeader* chunk = reinterpret_cast<ChunkHeader*>(baseAllocator_->Malloc(RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + capacity))) {
+ shared_->ownBaseAllocator = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator)();
+ if (ChunkHeader* chunk = static_cast<ChunkHeader*>(baseAllocator_->Malloc(SIZEOF_CHUNK_HEADER + capacity))) {
chunk->capacity = capacity;
chunk->size = 0;
- chunk->next = chunkHead_;
- chunkHead_ = chunk;
+ chunk->next = shared_->chunkHead;
+ shared_->chunkHead = chunk;
return true;
}
else
return false;
}
- static const int kDefaultChunkCapacity = RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY; //!< Default chunk capacity.
-
- //! Chunk header for perpending to each chunk.
- /*! Chunks are stored as a singly linked list.
- */
- struct ChunkHeader {
- size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself).
- size_t size; //!< Current size of allocated memory in bytes.
- ChunkHeader *next; //!< Next chunk in the linked list.
- };
+ static inline void* AlignBuffer(void* buf, size_t &size)
+ {
+ RAPIDJSON_NOEXCEPT_ASSERT(buf != 0);
+ const uintptr_t mask = sizeof(void*) - 1;
+ const uintptr_t ubuf = reinterpret_cast<uintptr_t>(buf);
+ if (RAPIDJSON_UNLIKELY(ubuf & mask)) {
+ const uintptr_t abuf = (ubuf + mask) & ~mask;
+ RAPIDJSON_ASSERT(size >= abuf - ubuf);
+ buf = reinterpret_cast<void*>(abuf);
+ size -= abuf - ubuf;
+ }
+ return buf;
+ }
- ChunkHeader *chunkHead_; //!< Head of the chunk linked-list. Only the head chunk serves allocation.
size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated.
- void *userBuffer_; //!< User supplied buffer.
BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks.
- BaseAllocator* ownBaseAllocator_; //!< base allocator created by this object.
+ SharedData *shared_; //!< The shared data of the allocator
};
+namespace internal {
+ template<typename, typename = void>
+ struct IsRefCounted :
+ public FalseType
+ { };
+ template<typename T>
+ struct IsRefCounted<T, typename internal::EnableIfCond<T::kRefCounted>::Type> :
+ public TrueType
+ { };
+}
+
+template<typename T, typename A>
+inline T* Realloc(A& a, T* old_p, size_t old_n, size_t new_n)
+{
+ RAPIDJSON_NOEXCEPT_ASSERT(old_n <= (std::numeric_limits<size_t>::max)() / sizeof(T) && new_n <= (std::numeric_limits<size_t>::max)() / sizeof(T));
+ return static_cast<T*>(a.Realloc(old_p, old_n * sizeof(T), new_n * sizeof(T)));
+}
+
+template<typename T, typename A>
+inline T *Malloc(A& a, size_t n = 1)
+{
+ return Realloc<T, A>(a, NULL, 0, n);
+}
+
+template<typename T, typename A>
+inline void Free(A& a, T *p, size_t n = 1)
+{
+ static_cast<void>(Realloc<T, A>(a, p, n, 0));
+}
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++) // std::allocator can safely be inherited
+#endif
+
+template <typename T, typename BaseAllocator = CrtAllocator>
+class StdAllocator :
+ public std::allocator<T>
+{
+ typedef std::allocator<T> allocator_type;
+#if RAPIDJSON_HAS_CXX11
+ typedef std::allocator_traits<allocator_type> traits_type;
+#else
+ typedef allocator_type traits_type;
+#endif
+
+public:
+ typedef BaseAllocator BaseAllocatorType;
+
+ StdAllocator() RAPIDJSON_NOEXCEPT :
+ allocator_type(),
+ baseAllocator_()
+ { }
+
+ StdAllocator(const StdAllocator& rhs) RAPIDJSON_NOEXCEPT :
+ allocator_type(rhs),
+ baseAllocator_(rhs.baseAllocator_)
+ { }
+
+ template<typename U>
+ StdAllocator(const StdAllocator<U, BaseAllocator>& rhs) RAPIDJSON_NOEXCEPT :
+ allocator_type(rhs),
+ baseAllocator_(rhs.baseAllocator_)
+ { }
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ StdAllocator(StdAllocator&& rhs) RAPIDJSON_NOEXCEPT :
+ allocator_type(std::move(rhs)),
+ baseAllocator_(std::move(rhs.baseAllocator_))
+ { }
+#endif
+#if RAPIDJSON_HAS_CXX11
+ using propagate_on_container_move_assignment = std::true_type;
+ using propagate_on_container_swap = std::true_type;
+#endif
+
+ /* implicit */
+ StdAllocator(const BaseAllocator& baseAllocator) RAPIDJSON_NOEXCEPT :
+ allocator_type(),
+ baseAllocator_(baseAllocator)
+ { }
+
+ ~StdAllocator() RAPIDJSON_NOEXCEPT
+ { }
+
+ template<typename U>
+ struct rebind {
+ typedef StdAllocator<U, BaseAllocator> other;
+ };
+
+ typedef typename traits_type::size_type size_type;
+ typedef typename traits_type::difference_type difference_type;
+
+ typedef typename traits_type::value_type value_type;
+ typedef typename traits_type::pointer pointer;
+ typedef typename traits_type::const_pointer const_pointer;
+
+#if RAPIDJSON_HAS_CXX11
+
+ typedef typename std::add_lvalue_reference<value_type>::type &reference;
+ typedef typename std::add_lvalue_reference<typename std::add_const<value_type>::type>::type &const_reference;
+
+ pointer address(reference r) const RAPIDJSON_NOEXCEPT
+ {
+ return std::addressof(r);
+ }
+ const_pointer address(const_reference r) const RAPIDJSON_NOEXCEPT
+ {
+ return std::addressof(r);
+ }
+
+ size_type max_size() const RAPIDJSON_NOEXCEPT
+ {
+ return traits_type::max_size(*this);
+ }
+
+ template <typename ...Args>
+ void construct(pointer p, Args&&... args)
+ {
+ traits_type::construct(*this, p, std::forward<Args>(args)...);
+ }
+ void destroy(pointer p)
+ {
+ traits_type::destroy(*this, p);
+ }
+
+#else // !RAPIDJSON_HAS_CXX11
+
+ typedef typename allocator_type::reference reference;
+ typedef typename allocator_type::const_reference const_reference;
+
+ pointer address(reference r) const RAPIDJSON_NOEXCEPT
+ {
+ return allocator_type::address(r);
+ }
+ const_pointer address(const_reference r) const RAPIDJSON_NOEXCEPT
+ {
+ return allocator_type::address(r);
+ }
+
+ size_type max_size() const RAPIDJSON_NOEXCEPT
+ {
+ return allocator_type::max_size();
+ }
+
+ void construct(pointer p, const_reference r)
+ {
+ allocator_type::construct(p, r);
+ }
+ void destroy(pointer p)
+ {
+ allocator_type::destroy(p);
+ }
+
+#endif // !RAPIDJSON_HAS_CXX11
+
+ template <typename U>
+ U* allocate(size_type n = 1, const void* = 0)
+ {
+ return RAPIDJSON_NAMESPACE::Malloc<U>(baseAllocator_, n);
+ }
+ template <typename U>
+ void deallocate(U* p, size_type n = 1)
+ {
+ RAPIDJSON_NAMESPACE::Free<U>(baseAllocator_, p, n);
+ }
+
+ pointer allocate(size_type n = 1, const void* = 0)
+ {
+ return allocate<value_type>(n);
+ }
+ void deallocate(pointer p, size_type n = 1)
+ {
+ deallocate<value_type>(p, n);
+ }
+
+#if RAPIDJSON_HAS_CXX11
+ using is_always_equal = std::is_empty<BaseAllocator>;
+#endif
+
+ template<typename U>
+ bool operator==(const StdAllocator<U, BaseAllocator>& rhs) const RAPIDJSON_NOEXCEPT
+ {
+ return baseAllocator_ == rhs.baseAllocator_;
+ }
+ template<typename U>
+ bool operator!=(const StdAllocator<U, BaseAllocator>& rhs) const RAPIDJSON_NOEXCEPT
+ {
+ return !operator==(rhs);
+ }
+
+ //! rapidjson Allocator concept
+ static const bool kNeedFree = BaseAllocator::kNeedFree;
+ static const bool kRefCounted = internal::IsRefCounted<BaseAllocator>::Value;
+ void* Malloc(size_t size)
+ {
+ return baseAllocator_.Malloc(size);
+ }
+ void* Realloc(void* originalPtr, size_t originalSize, size_t newSize)
+ {
+ return baseAllocator_.Realloc(originalPtr, originalSize, newSize);
+ }
+ static void Free(void *ptr) RAPIDJSON_NOEXCEPT
+ {
+ BaseAllocator::Free(ptr);
+ }
+
+private:
+ template <typename, typename>
+ friend class StdAllocator; // access to StdAllocator<!T>.*
+
+ BaseAllocator baseAllocator_;
+};
+
+#if !RAPIDJSON_HAS_CXX17 // std::allocator<void> deprecated in C++17
+template <typename BaseAllocator>
+class StdAllocator<void, BaseAllocator> :
+ public std::allocator<void>
+{
+ typedef std::allocator<void> allocator_type;
+
+public:
+ typedef BaseAllocator BaseAllocatorType;
+
+ StdAllocator() RAPIDJSON_NOEXCEPT :
+ allocator_type(),
+ baseAllocator_()
+ { }
+
+ StdAllocator(const StdAllocator& rhs) RAPIDJSON_NOEXCEPT :
+ allocator_type(rhs),
+ baseAllocator_(rhs.baseAllocator_)
+ { }
+
+ template<typename U>
+ StdAllocator(const StdAllocator<U, BaseAllocator>& rhs) RAPIDJSON_NOEXCEPT :
+ allocator_type(rhs),
+ baseAllocator_(rhs.baseAllocator_)
+ { }
+
+ /* implicit */
+ StdAllocator(const BaseAllocator& baseAllocator) RAPIDJSON_NOEXCEPT :
+ allocator_type(),
+ baseAllocator_(baseAllocator)
+ { }
+
+ ~StdAllocator() RAPIDJSON_NOEXCEPT
+ { }
+
+ template<typename U>
+ struct rebind {
+ typedef StdAllocator<U, BaseAllocator> other;
+ };
+
+ typedef typename allocator_type::value_type value_type;
+
+private:
+ template <typename, typename>
+ friend class StdAllocator; // access to StdAllocator<!T>.*
+
+ BaseAllocator baseAllocator_;
+};
+#endif
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_POP
+#endif
+
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_ENCODINGS_H_
diff --git a/src/native/external/rapidjson/cursorstreamwrapper.h b/src/native/external/rapidjson/cursorstreamwrapper.h
deleted file mode 100644
index 52c11a7c01d7..000000000000
--- a/src/native/external/rapidjson/cursorstreamwrapper.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Tencent is pleased to support the open source community by making RapidJSON available.
-//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
-//
-// Licensed under the MIT License (the "License"); you may not use this file except
-// in compliance with the License. You may obtain a copy of the License at
-//
-// http://opensource.org/licenses/MIT
-//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations under the License.
-
-#ifndef RAPIDJSON_CURSORSTREAMWRAPPER_H_
-#define RAPIDJSON_CURSORSTREAMWRAPPER_H_
-
-#include "stream.h"
-
-#if defined(__GNUC__)
-RAPIDJSON_DIAG_PUSH
-RAPIDJSON_DIAG_OFF(effc++)
-#endif
-
-#if defined(_MSC_VER) && _MSC_VER <= 1800
-RAPIDJSON_DIAG_PUSH
-RAPIDJSON_DIAG_OFF(4702) // unreachable code
-RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
-#endif
-
-RAPIDJSON_NAMESPACE_BEGIN
-
-
-//! Cursor stream wrapper for counting line and column number if error exists.
-/*!
- \tparam InputStream Any stream that implements Stream Concept
-*/
-template <typename InputStream, typename Encoding = UTF8<> >
-class CursorStreamWrapper : public GenericStreamWrapper<InputStream, Encoding> {
-public:
- typedef typename Encoding::Ch Ch;
-
- CursorStreamWrapper(InputStream& is):
- GenericStreamWrapper<InputStream, Encoding>(is), line_(1), col_(0) {}
-
- // counting line and column number
- Ch Take() {
- Ch ch = this->is_.Take();
- if(ch == '\n') {
- line_ ++;
- col_ = 0;
- } else {
- col_ ++;
- }
- return ch;
- }
-
- //! Get the error line number, if error exists.
- size_t GetLine() const { return line_; }
- //! Get the error column number, if error exists.
- size_t GetColumn() const { return col_; }
-
-private:
- size_t line_; //!< Current Line
- size_t col_; //!< Current Column
-};
-
-#if defined(_MSC_VER) && _MSC_VER <= 1800
-RAPIDJSON_DIAG_POP
-#endif
-
-#if defined(__GNUC__)
-RAPIDJSON_DIAG_POP
-#endif
-
-RAPIDJSON_NAMESPACE_END
-
-#endif // RAPIDJSON_CURSORSTREAMWRAPPER_H_
diff --git a/src/native/external/rapidjson/document.h b/src/native/external/rapidjson/document.h
index 74666e3423ee..2cd9a70a6003 100644
--- a/src/native/external/rapidjson/document.h
+++ b/src/native/external/rapidjson/document.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
@@ -24,6 +24,9 @@
#include "encodedstream.h"
#include <new> // placement new
#include <limits>
+#ifdef __cpp_lib_three_way_comparison
+#include <compare>
+#endif
RAPIDJSON_DIAG_PUSH
#ifdef __clang__
@@ -39,12 +42,21 @@ RAPIDJSON_DIAG_OFF(4244) // conversion from kXxxFlags to 'uint16_t', possible lo
RAPIDJSON_DIAG_OFF(effc++)
#endif // __GNUC__
+#ifdef GetObject
+// see https://github.com/Tencent/rapidjson/issues/1448
+// a former included windows.h might have defined a macro called GetObject, which affects
+// GetObject defined here. This ensures the macro does not get applied
+#pragma push_macro("GetObject")
+#define RAPIDJSON_WINDOWS_GETOBJECT_WORKAROUND_APPLIED
+#undef GetObject
+#endif
+
#ifndef RAPIDJSON_NOMEMBERITERATORCLASS
#include <iterator> // std::random_access_iterator_tag
#endif
-#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
-#include <utility> // std::move
+#if RAPIDJSON_USE_MEMBERSMAP
+#include <map> // std::multimap
#endif
RAPIDJSON_NAMESPACE_BEGIN
@@ -56,6 +68,48 @@ class GenericValue;
template <typename Encoding, typename Allocator, typename StackAllocator>
class GenericDocument;
+/*! \def RAPIDJSON_DEFAULT_ALLOCATOR
+ \ingroup RAPIDJSON_CONFIG
+ \brief Allows to choose default allocator.
+
+ User can define this to use CrtAllocator or MemoryPoolAllocator.
+*/
+#ifndef RAPIDJSON_DEFAULT_ALLOCATOR
+#define RAPIDJSON_DEFAULT_ALLOCATOR ::RAPIDJSON_NAMESPACE::MemoryPoolAllocator<::RAPIDJSON_NAMESPACE::CrtAllocator>
+#endif
+
+/*! \def RAPIDJSON_DEFAULT_STACK_ALLOCATOR
+ \ingroup RAPIDJSON_CONFIG
+ \brief Allows to choose default stack allocator for Document.
+
+ User can define this to use CrtAllocator or MemoryPoolAllocator.
+*/
+#ifndef RAPIDJSON_DEFAULT_STACK_ALLOCATOR
+#define RAPIDJSON_DEFAULT_STACK_ALLOCATOR ::RAPIDJSON_NAMESPACE::CrtAllocator
+#endif
+
+/*! \def RAPIDJSON_VALUE_DEFAULT_OBJECT_CAPACITY
+ \ingroup RAPIDJSON_CONFIG
+ \brief User defined kDefaultObjectCapacity value.
+
+ User can define this as any natural number.
+*/
+#ifndef RAPIDJSON_VALUE_DEFAULT_OBJECT_CAPACITY
+// number of objects that rapidjson::Value allocates memory for by default
+#define RAPIDJSON_VALUE_DEFAULT_OBJECT_CAPACITY 16
+#endif
+
+/*! \def RAPIDJSON_VALUE_DEFAULT_ARRAY_CAPACITY
+ \ingroup RAPIDJSON_CONFIG
+ \brief User defined kDefaultArrayCapacity value.
+
+ User can define this as any natural number.
+*/
+#ifndef RAPIDJSON_VALUE_DEFAULT_ARRAY_CAPACITY
+// number of array elements that rapidjson::Value allocates memory for by default
+#define RAPIDJSON_VALUE_DEFAULT_ARRAY_CAPACITY 16
+#endif
+
//! Name-value pair in a JSON object value.
/*!
This class was internal to GenericValue. It used to be a inner struct.
@@ -63,15 +117,45 @@ class GenericDocument;
https://code.google.com/p/rapidjson/issues/detail?id=64
*/
template <typename Encoding, typename Allocator>
-struct GenericMember {
+class GenericMember {
+public:
GenericValue<Encoding, Allocator> name; //!< name of member (must be a string)
GenericValue<Encoding, Allocator> value; //!< value of member.
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ //! Move constructor in C++11
+ GenericMember(GenericMember&& rhs) RAPIDJSON_NOEXCEPT
+ : name(std::move(rhs.name)),
+ value(std::move(rhs.value))
+ {
+ }
+
+ //! Move assignment in C++11
+ GenericMember& operator=(GenericMember&& rhs) RAPIDJSON_NOEXCEPT {
+ return *this = static_cast<GenericMember&>(rhs);
+ }
+#endif
+
+ //! Assignment with move semantics.
+ /*! \param rhs Source of the assignment. Its name and value will become a null value after assignment.
+ */
+ GenericMember& operator=(GenericMember& rhs) RAPIDJSON_NOEXCEPT {
+ if (RAPIDJSON_LIKELY(this != &rhs)) {
+ name = rhs.name;
+ value = rhs.value;
+ }
+ return *this;
+ }
+
// swap() for std::sort() and other potential use in STL.
friend inline void swap(GenericMember& a, GenericMember& b) RAPIDJSON_NOEXCEPT {
a.name.Swap(b.name);
a.value.Swap(b.value);
}
+
+private:
+ //! Copy constructor is not permitted.
+ GenericMember(const GenericMember& rhs);
};
///////////////////////////////////////////////////////////////////////////////
@@ -175,12 +259,16 @@ public:
//! @name relations
//@{
- bool operator==(ConstIterator that) const { return ptr_ == that.ptr_; }
- bool operator!=(ConstIterator that) const { return ptr_ != that.ptr_; }
- bool operator<=(ConstIterator that) const { return ptr_ <= that.ptr_; }
- bool operator>=(ConstIterator that) const { return ptr_ >= that.ptr_; }
- bool operator< (ConstIterator that) const { return ptr_ < that.ptr_; }
- bool operator> (ConstIterator that) const { return ptr_ > that.ptr_; }
+ template <bool Const_> bool operator==(const GenericMemberIterator<Const_, Encoding, Allocator>& that) const { return ptr_ == that.ptr_; }
+ template <bool Const_> bool operator!=(const GenericMemberIterator<Const_, Encoding, Allocator>& that) const { return ptr_ != that.ptr_; }
+ template <bool Const_> bool operator<=(const GenericMemberIterator<Const_, Encoding, Allocator>& that) const { return ptr_ <= that.ptr_; }
+ template <bool Const_> bool operator>=(const GenericMemberIterator<Const_, Encoding, Allocator>& that) const { return ptr_ >= that.ptr_; }
+ template <bool Const_> bool operator< (const GenericMemberIterator<Const_, Encoding, Allocator>& that) const { return ptr_ < that.ptr_; }
+ template <bool Const_> bool operator> (const GenericMemberIterator<Const_, Encoding, Allocator>& that) const { return ptr_ > that.ptr_; }
+
+#ifdef __cpp_lib_three_way_comparison
+ template <bool Const_> std::strong_ordering operator<=>(const GenericMemberIterator<Const_, Encoding, Allocator>& that) const { return ptr_ <=> that.ptr_; }
+#endif
//@}
//! @name dereference
@@ -210,12 +298,14 @@ class GenericMemberIterator;
//! non-const GenericMemberIterator
template <typename Encoding, typename Allocator>
class GenericMemberIterator<false,Encoding,Allocator> {
+public:
//! use plain pointer as iterator type
typedef GenericMember<Encoding,Allocator>* Iterator;
};
//! const GenericMemberIterator
template <typename Encoding, typename Allocator>
class GenericMemberIterator<true,Encoding,Allocator> {
+public:
//! use plain const pointer as iterator type
typedef const GenericMember<Encoding,Allocator>* Iterator;
};
@@ -574,7 +664,7 @@ template <bool, typename> class GenericObject;
\tparam Encoding Encoding of the value. (Even non-string values need to have the same encoding in a document)
\tparam Allocator Allocator type for allocating memory of object, array and string.
*/
-template <typename Encoding, typename Allocator = MemoryPoolAllocator<> >
+template <typename Encoding, typename Allocator = RAPIDJSON_DEFAULT_ALLOCATOR >
class GenericValue {
public:
//! Name-value pair in an object.
@@ -651,18 +741,8 @@ public:
template <typename SourceAllocator>
GenericValue(const GenericValue<Encoding,SourceAllocator>& rhs, Allocator& allocator, bool copyConstStrings = false) {
switch (rhs.GetType()) {
- case kObjectType: {
- SizeType count = rhs.data_.o.size;
- Member* lm = reinterpret_cast<Member*>(allocator.Malloc(count * sizeof(Member)));
- const typename GenericValue<Encoding,SourceAllocator>::Member* rm = rhs.GetMembersPointer();
- for (SizeType i = 0; i < count; i++) {
- new (&lm[i].name) GenericValue(rm[i].name, allocator, copyConstStrings);
- new (&lm[i].value) GenericValue(rm[i].value, allocator, copyConstStrings);
- }
- data_.f.flags = kObjectFlag;
- data_.o.size = data_.o.capacity = count;
- SetMembersPointer(lm);
- }
+ case kObjectType:
+ DoCopyMembers(rhs, allocator, copyConstStrings);
break;
case kArrayType: {
SizeType count = rhs.data_.a.size;
@@ -798,25 +878,30 @@ public:
/*! Need to destruct elements of array, members of object, or copy-string.
*/
~GenericValue() {
- if (Allocator::kNeedFree) { // Shortcut by Allocator's trait
+ // With RAPIDJSON_USE_MEMBERSMAP, the maps need to be destroyed to release
+ // their Allocator if it's refcounted (e.g. MemoryPoolAllocator).
+ if (Allocator::kNeedFree || (RAPIDJSON_USE_MEMBERSMAP+0 &&
+ internal::IsRefCounted<Allocator>::Value)) {
switch(data_.f.flags) {
case kArrayFlag:
{
GenericValue* e = GetElementsPointer();
for (GenericValue* v = e; v != e + data_.a.size; ++v)
v->~GenericValue();
- Allocator::Free(e);
+ if (Allocator::kNeedFree) { // Shortcut by Allocator's trait
+ Allocator::Free(e);
+ }
}
break;
case kObjectFlag:
- for (MemberIterator m = MemberBegin(); m != MemberEnd(); ++m)
- m->~Member();
- Allocator::Free(GetMembersPointer());
+ DoFreeMembers();
break;
case kCopyStringFlag:
- Allocator::Free(const_cast<Ch*>(GetStringPointer()));
+ if (Allocator::kNeedFree) { // Shortcut by Allocator's trait
+ Allocator::Free(const_cast<Ch*>(GetStringPointer()));
+ }
break;
default:
@@ -835,8 +920,13 @@ public:
*/
GenericValue& operator=(GenericValue& rhs) RAPIDJSON_NOEXCEPT {
if (RAPIDJSON_LIKELY(this != &rhs)) {
+ // Can't destroy "this" before assigning "rhs", otherwise "rhs"
+ // could be used after free if it's an sub-Value of "this",
+ // hence the temporary danse.
+ GenericValue temp;
+ temp.RawAssign(rhs);
this->~GenericValue();
- RawAssign(rhs);
+ RawAssign(temp);
}
return *this;
}
@@ -988,6 +1078,7 @@ public:
*/
template <typename T> RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>,internal::IsGenericValue<T> >), (bool)) operator==(const T& rhs) const { return *this == GenericValue(rhs); }
+#ifndef __cpp_impl_three_way_comparison
//! Not-equal-to operator
/*! \return !(*this == rhs)
*/
@@ -1012,6 +1103,7 @@ public:
*/
template <typename T> friend RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue<T>), (bool)) operator!=(const T& lhs, const GenericValue& rhs) { return !(rhs == lhs); }
//@}
+#endif
//!@name Type
//@{
@@ -1138,13 +1230,28 @@ public:
else {
RAPIDJSON_ASSERT(false); // see above note
- // This will generate -Wexit-time-destructors in clang
- // static GenericValue NullValue;
- // return NullValue;
-
- // Use static buffer and placement-new to prevent destruction
- static char buffer[sizeof(GenericValue)];
+#if RAPIDJSON_HAS_CXX11
+ // Use thread-local storage to prevent races between threads.
+ // Use static buffer and placement-new to prevent destruction, with
+ // alignas() to ensure proper alignment.
+ alignas(GenericValue) thread_local static char buffer[sizeof(GenericValue)];
+ return *new (buffer) GenericValue();
+#elif defined(_MSC_VER) && _MSC_VER < 1900
+ // There's no way to solve both thread locality and proper alignment
+ // simultaneously.
+ __declspec(thread) static char buffer[sizeof(GenericValue)];
return *new (buffer) GenericValue();
+#elif defined(__GNUC__) || defined(__clang__)
+ // This will generate -Wexit-time-destructors in clang, but that's
+ // better than having under-alignment.
+ __thread static GenericValue buffer;
+ return buffer;
+#else
+ // Don't know what compiler this is, so don't know how to ensure
+ // thread-locality.
+ static GenericValue buffer;
+ return buffer;
+#endif
}
}
template <typename SourceAllocator>
@@ -1177,10 +1284,7 @@ public:
*/
GenericValue& MemberReserve(SizeType newCapacity, Allocator &allocator) {
RAPIDJSON_ASSERT(IsObject());
- if (newCapacity > data_.o.capacity) {
- SetMembersPointer(reinterpret_cast<Member*>(allocator.Realloc(GetMembersPointer(), data_.o.capacity * sizeof(Member), newCapacity * sizeof(Member))));
- data_.o.capacity = newCapacity;
- }
+ DoReserveMembers(newCapacity, allocator);
return *this;
}
@@ -1254,11 +1358,7 @@ public:
MemberIterator FindMember(const GenericValue<Encoding, SourceAllocator>& name) {
RAPIDJSON_ASSERT(IsObject());
RAPIDJSON_ASSERT(name.IsString());
- MemberIterator member = MemberBegin();
- for ( ; member != MemberEnd(); ++member)
- if (name.StringEqual(member->name))
- break;
- return member;
+ return DoFindMember(name);
}
template <typename SourceAllocator> ConstMemberIterator FindMember(const GenericValue<Encoding, SourceAllocator>& name) const { return const_cast<GenericValue&>(*this).FindMember(name); }
@@ -1287,14 +1387,7 @@ public:
GenericValue& AddMember(GenericValue& name, GenericValue& value, Allocator& allocator) {
RAPIDJSON_ASSERT(IsObject());
RAPIDJSON_ASSERT(name.IsString());
-
- ObjectData& o = data_.o;
- if (o.size >= o.capacity)
- MemberReserve(o.capacity == 0 ? kDefaultObjectCapacity : (o.capacity + (o.capacity + 1) / 2), allocator);
- Member* members = GetMembersPointer();
- members[o.size].name.RawAssign(name);
- members[o.size].value.RawAssign(value);
- o.size++;
+ DoAddMember(name, value, allocator);
return *this;
}
@@ -1428,9 +1521,7 @@ public:
*/
void RemoveAllMembers() {
RAPIDJSON_ASSERT(IsObject());
- for (MemberIterator m = MemberBegin(); m != MemberEnd(); ++m)
- m->~Member();
- data_.o.size = 0;
+ DoClearMembers();
}
//! Remove a member in object by its name.
@@ -1474,14 +1565,7 @@ public:
RAPIDJSON_ASSERT(data_.o.size > 0);
RAPIDJSON_ASSERT(GetMembersPointer() != 0);
RAPIDJSON_ASSERT(m >= MemberBegin() && m < MemberEnd());
-
- MemberIterator last(GetMembersPointer() + (data_.o.size - 1));
- if (data_.o.size > 1 && m != last)
- *m = *last; // Move the last one to this place
- else
- m->~Member(); // Only one left, just destroy
- --data_.o.size;
- return m;
+ return DoRemoveMember(m);
}
//! Remove a member from an object by iterator.
@@ -1513,13 +1597,7 @@ public:
RAPIDJSON_ASSERT(first >= MemberBegin());
RAPIDJSON_ASSERT(first <= last);
RAPIDJSON_ASSERT(last <= MemberEnd());
-
- MemberIterator pos = MemberBegin() + (first - MemberBegin());
- for (MemberIterator itr = pos; itr != last; ++itr)
- itr->~Member();
- std::memmove(static_cast<void*>(&*pos), &*last, static_cast<size_t>(MemberEnd() - last) * sizeof(Member));
- data_.o.size -= static_cast<SizeType>(last - first);
- return pos;
+ return DoEraseMembers(first, last);
}
//! Erase a member in object by its name.
@@ -1548,7 +1626,9 @@ public:
}
Object GetObject() { RAPIDJSON_ASSERT(IsObject()); return Object(*this); }
+ Object GetObj() { RAPIDJSON_ASSERT(IsObject()); return Object(*this); }
ConstObject GetObject() const { RAPIDJSON_ASSERT(IsObject()); return ConstObject(*this); }
+ ConstObject GetObj() const { RAPIDJSON_ASSERT(IsObject()); return ConstObject(*this); }
//@}
@@ -1770,12 +1850,12 @@ public:
//!@name String
//@{
- const Ch* GetString() const { RAPIDJSON_ASSERT(IsString()); return (data_.f.flags & kInlineStrFlag) ? data_.ss.str : GetStringPointer(); }
+ const Ch* GetString() const { RAPIDJSON_ASSERT(IsString()); return DataString(data_); }
//! Get the length of string.
/*! Since rapidjson permits "\\u0000" in the json string, strlen(v.GetString()) may not equal to v.GetStringLength().
*/
- SizeType GetStringLength() const { RAPIDJSON_ASSERT(IsString()); return ((data_.f.flags & kInlineStrFlag) ? (data_.ss.GetLength()) : data_.s.length); }
+ SizeType GetStringLength() const { RAPIDJSON_ASSERT(IsString()); return DataStringLength(data_); }
//! Set this value as a string without copying source string.
/*! This version has better performance with supplied length, and also support string containing null character.
@@ -1886,7 +1966,7 @@ public:
case kArrayType:
if (RAPIDJSON_UNLIKELY(!handler.StartArray()))
return false;
- for (const GenericValue* v = Begin(); v != End(); ++v)
+ for (ConstValueIterator v = Begin(); v != End(); ++v)
if (RAPIDJSON_UNLIKELY(!v->Accept(handler)))
return false;
return handler.EndArray(data_.a.size);
@@ -1922,25 +2002,26 @@ private:
// Initial flags of different types.
kNullFlag = kNullType,
- kTrueFlag = kTrueType | kBoolFlag,
- kFalseFlag = kFalseType | kBoolFlag,
- kNumberIntFlag = kNumberType | kNumberFlag | kIntFlag | kInt64Flag,
- kNumberUintFlag = kNumberType | kNumberFlag | kUintFlag | kUint64Flag | kInt64Flag,
- kNumberInt64Flag = kNumberType | kNumberFlag | kInt64Flag,
- kNumberUint64Flag = kNumberType | kNumberFlag | kUint64Flag,
- kNumberDoubleFlag = kNumberType | kNumberFlag | kDoubleFlag,
- kNumberAnyFlag = kNumberType | kNumberFlag | kIntFlag | kInt64Flag | kUintFlag | kUint64Flag | kDoubleFlag,
- kConstStringFlag = kStringType | kStringFlag,
- kCopyStringFlag = kStringType | kStringFlag | kCopyFlag,
- kShortStringFlag = kStringType | kStringFlag | kCopyFlag | kInlineStrFlag,
+ // These casts are added to suppress the warning on MSVC about bitwise operations between enums of different types.
+ kTrueFlag = static_cast<int>(kTrueType) | static_cast<int>(kBoolFlag),
+ kFalseFlag = static_cast<int>(kFalseType) | static_cast<int>(kBoolFlag),
+ kNumberIntFlag = static_cast<int>(kNumberType) | static_cast<int>(kNumberFlag | kIntFlag | kInt64Flag),
+ kNumberUintFlag = static_cast<int>(kNumberType) | static_cast<int>(kNumberFlag | kUintFlag | kUint64Flag | kInt64Flag),
+ kNumberInt64Flag = static_cast<int>(kNumberType) | static_cast<int>(kNumberFlag | kInt64Flag),
+ kNumberUint64Flag = static_cast<int>(kNumberType) | static_cast<int>(kNumberFlag | kUint64Flag),
+ kNumberDoubleFlag = static_cast<int>(kNumberType) | static_cast<int>(kNumberFlag | kDoubleFlag),
+ kNumberAnyFlag = static_cast<int>(kNumberType) | static_cast<int>(kNumberFlag | kIntFlag | kInt64Flag | kUintFlag | kUint64Flag | kDoubleFlag),
+ kConstStringFlag = static_cast<int>(kStringType) | static_cast<int>(kStringFlag),
+ kCopyStringFlag = static_cast<int>(kStringType) | static_cast<int>(kStringFlag | kCopyFlag),
+ kShortStringFlag = static_cast<int>(kStringType) | static_cast<int>(kStringFlag | kCopyFlag | kInlineStrFlag),
kObjectFlag = kObjectType,
kArrayFlag = kArrayType,
kTypeMask = 0x07
};
- static const SizeType kDefaultArrayCapacity = 16;
- static const SizeType kDefaultObjectCapacity = 16;
+ static const SizeType kDefaultArrayCapacity = RAPIDJSON_VALUE_DEFAULT_ARRAY_CAPACITY;
+ static const SizeType kDefaultObjectCapacity = RAPIDJSON_VALUE_DEFAULT_OBJECT_CAPACITY;
struct Flag {
#if RAPIDJSON_48BITPOINTER_OPTIMIZATION
@@ -2023,6 +2104,13 @@ private:
Flag f;
}; // 16 bytes in 32-bit mode, 24 bytes in 64-bit mode, 16 bytes in 64-bit with RAPIDJSON_48BITPOINTER_OPTIMIZATION
+ static RAPIDJSON_FORCEINLINE const Ch* DataString(const Data& data) {
+ return (data.f.flags & kInlineStrFlag) ? data.ss.str : RAPIDJSON_GETPOINTER(Ch, data.s.str);
+ }
+ static RAPIDJSON_FORCEINLINE SizeType DataStringLength(const Data& data) {
+ return (data.f.flags & kInlineStrFlag) ? data.ss.GetLength() : data.s.length;
+ }
+
RAPIDJSON_FORCEINLINE const Ch* GetStringPointer() const { return RAPIDJSON_GETPOINTER(Ch, data_.s.str); }
RAPIDJSON_FORCEINLINE const Ch* SetStringPointer(const Ch* str) { return RAPIDJSON_SETPOINTER(Ch, data_.s.str, str); }
RAPIDJSON_FORCEINLINE GenericValue* GetElementsPointer() const { return RAPIDJSON_GETPOINTER(GenericValue, data_.a.elements); }
@@ -2030,6 +2118,286 @@ private:
RAPIDJSON_FORCEINLINE Member* GetMembersPointer() const { return RAPIDJSON_GETPOINTER(Member, data_.o.members); }
RAPIDJSON_FORCEINLINE Member* SetMembersPointer(Member* members) { return RAPIDJSON_SETPOINTER(Member, data_.o.members, members); }
+#if RAPIDJSON_USE_MEMBERSMAP
+
+ struct MapTraits {
+ struct Less {
+ bool operator()(const Data& s1, const Data& s2) const {
+ SizeType n1 = DataStringLength(s1), n2 = DataStringLength(s2);
+ int cmp = std::memcmp(DataString(s1), DataString(s2), sizeof(Ch) * (n1 < n2 ? n1 : n2));
+ return cmp < 0 || (cmp == 0 && n1 < n2);
+ }
+ };
+ typedef std::pair<const Data, SizeType> Pair;
+ typedef std::multimap<Data, SizeType, Less, StdAllocator<Pair, Allocator> > Map;
+ typedef typename Map::iterator Iterator;
+ };
+ typedef typename MapTraits::Map Map;
+ typedef typename MapTraits::Less MapLess;
+ typedef typename MapTraits::Pair MapPair;
+ typedef typename MapTraits::Iterator MapIterator;
+
+ //
+ // Layout of the members' map/array, re(al)located according to the needed capacity:
+ //
+ // {Map*}<>{capacity}<>{Member[capacity]}<>{MapIterator[capacity]}
+ //
+ // (where <> stands for the RAPIDJSON_ALIGN-ment, if needed)
+ //
+
+ static RAPIDJSON_FORCEINLINE size_t GetMapLayoutSize(SizeType capacity) {
+ return RAPIDJSON_ALIGN(sizeof(Map*)) +
+ RAPIDJSON_ALIGN(sizeof(SizeType)) +
+ RAPIDJSON_ALIGN(capacity * sizeof(Member)) +
+ capacity * sizeof(MapIterator);
+ }
+
+ static RAPIDJSON_FORCEINLINE SizeType &GetMapCapacity(Map* &map) {
+ return *reinterpret_cast<SizeType*>(reinterpret_cast<uintptr_t>(&map) +
+ RAPIDJSON_ALIGN(sizeof(Map*)));
+ }
+
+ static RAPIDJSON_FORCEINLINE Member* GetMapMembers(Map* &map) {
+ return reinterpret_cast<Member*>(reinterpret_cast<uintptr_t>(&map) +
+ RAPIDJSON_ALIGN(sizeof(Map*)) +
+ RAPIDJSON_ALIGN(sizeof(SizeType)));
+ }
+
+ static RAPIDJSON_FORCEINLINE MapIterator* GetMapIterators(Map* &map) {
+ return reinterpret_cast<MapIterator*>(reinterpret_cast<uintptr_t>(&map) +
+ RAPIDJSON_ALIGN(sizeof(Map*)) +
+ RAPIDJSON_ALIGN(sizeof(SizeType)) +
+ RAPIDJSON_ALIGN(GetMapCapacity(map) * sizeof(Member)));
+ }
+
+ static RAPIDJSON_FORCEINLINE Map* &GetMap(Member* members) {
+ RAPIDJSON_ASSERT(members != 0);
+ return *reinterpret_cast<Map**>(reinterpret_cast<uintptr_t>(members) -
+ RAPIDJSON_ALIGN(sizeof(SizeType)) -
+ RAPIDJSON_ALIGN(sizeof(Map*)));
+ }
+
+ // Some compilers' debug mechanisms want all iterators to be destroyed, for their accounting..
+ RAPIDJSON_FORCEINLINE MapIterator DropMapIterator(MapIterator& rhs) {
+#if RAPIDJSON_HAS_CXX11
+ MapIterator ret = std::move(rhs);
+#else
+ MapIterator ret = rhs;
+#endif
+ rhs.~MapIterator();
+ return ret;
+ }
+
+ Map* &DoReallocMap(Map** oldMap, SizeType newCapacity, Allocator& allocator) {
+ Map **newMap = static_cast<Map**>(allocator.Malloc(GetMapLayoutSize(newCapacity)));
+ GetMapCapacity(*newMap) = newCapacity;
+ if (!oldMap) {
+ *newMap = new (allocator.Malloc(sizeof(Map))) Map(MapLess(), allocator);
+ }
+ else {
+ *newMap = *oldMap;
+ size_t count = (*oldMap)->size();
+ std::memcpy(static_cast<void*>(GetMapMembers(*newMap)),
+ static_cast<void*>(GetMapMembers(*oldMap)),
+ count * sizeof(Member));
+ MapIterator *oldIt = GetMapIterators(*oldMap),
+ *newIt = GetMapIterators(*newMap);
+ while (count--) {
+ new (&newIt[count]) MapIterator(DropMapIterator(oldIt[count]));
+ }
+ Allocator::Free(oldMap);
+ }
+ return *newMap;
+ }
+
+ RAPIDJSON_FORCEINLINE Member* DoAllocMembers(SizeType capacity, Allocator& allocator) {
+ return GetMapMembers(DoReallocMap(0, capacity, allocator));
+ }
+
+ void DoReserveMembers(SizeType newCapacity, Allocator& allocator) {
+ ObjectData& o = data_.o;
+ if (newCapacity > o.capacity) {
+ Member* oldMembers = GetMembersPointer();
+ Map **oldMap = oldMembers ? &GetMap(oldMembers) : 0,
+ *&newMap = DoReallocMap(oldMap, newCapacity, allocator);
+ RAPIDJSON_SETPOINTER(Member, o.members, GetMapMembers(newMap));
+ o.capacity = newCapacity;
+ }
+ }
+
+ template <typename SourceAllocator>
+ MemberIterator DoFindMember(const GenericValue<Encoding, SourceAllocator>& name) {
+ if (Member* members = GetMembersPointer()) {
+ Map* &map = GetMap(members);
+ MapIterator mit = map->find(reinterpret_cast<const Data&>(name.data_));
+ if (mit != map->end()) {
+ return MemberIterator(&members[mit->second]);
+ }
+ }
+ return MemberEnd();
+ }
+
+ void DoClearMembers() {
+ if (Member* members = GetMembersPointer()) {
+ Map* &map = GetMap(members);
+ MapIterator* mit = GetMapIterators(map);
+ for (SizeType i = 0; i < data_.o.size; i++) {
+ map->erase(DropMapIterator(mit[i]));
+ members[i].~Member();
+ }
+ data_.o.size = 0;
+ }
+ }
+
+ void DoFreeMembers() {
+ if (Member* members = GetMembersPointer()) {
+ GetMap(members)->~Map();
+ for (SizeType i = 0; i < data_.o.size; i++) {
+ members[i].~Member();
+ }
+ if (Allocator::kNeedFree) { // Shortcut by Allocator's trait
+ Map** map = &GetMap(members);
+ Allocator::Free(*map);
+ Allocator::Free(map);
+ }
+ }
+ }
+
+#else // !RAPIDJSON_USE_MEMBERSMAP
+
+ RAPIDJSON_FORCEINLINE Member* DoAllocMembers(SizeType capacity, Allocator& allocator) {
+ return Malloc<Member>(allocator, capacity);
+ }
+
+ void DoReserveMembers(SizeType newCapacity, Allocator& allocator) {
+ ObjectData& o = data_.o;
+ if (newCapacity > o.capacity) {
+ Member* newMembers = Realloc<Member>(allocator, GetMembersPointer(), o.capacity, newCapacity);
+ RAPIDJSON_SETPOINTER(Member, o.members, newMembers);
+ o.capacity = newCapacity;
+ }
+ }
+
+ template <typename SourceAllocator>
+ MemberIterator DoFindMember(const GenericValue<Encoding, SourceAllocator>& name) {
+ MemberIterator member = MemberBegin();
+ for ( ; member != MemberEnd(); ++member)
+ if (name.StringEqual(member->name))
+ break;
+ return member;
+ }
+
+ void DoClearMembers() {
+ for (MemberIterator m = MemberBegin(); m != MemberEnd(); ++m)
+ m->~Member();
+ data_.o.size = 0;
+ }
+
+ void DoFreeMembers() {
+ for (MemberIterator m = MemberBegin(); m != MemberEnd(); ++m)
+ m->~Member();
+ Allocator::Free(GetMembersPointer());
+ }
+
+#endif // !RAPIDJSON_USE_MEMBERSMAP
+
+ void DoAddMember(GenericValue& name, GenericValue& value, Allocator& allocator) {
+ ObjectData& o = data_.o;
+ if (o.size >= o.capacity)
+ DoReserveMembers(o.capacity ? (o.capacity + (o.capacity + 1) / 2) : kDefaultObjectCapacity, allocator);
+ Member* members = GetMembersPointer();
+ Member* m = members + o.size;
+ m->name.RawAssign(name);
+ m->value.RawAssign(value);
+#if RAPIDJSON_USE_MEMBERSMAP
+ Map* &map = GetMap(members);
+ MapIterator* mit = GetMapIterators(map);
+ new (&mit[o.size]) MapIterator(map->insert(MapPair(m->name.data_, o.size)));
+#endif
+ ++o.size;
+ }
+
+ MemberIterator DoRemoveMember(MemberIterator m) {
+ ObjectData& o = data_.o;
+ Member* members = GetMembersPointer();
+#if RAPIDJSON_USE_MEMBERSMAP
+ Map* &map = GetMap(members);
+ MapIterator* mit = GetMapIterators(map);
+ SizeType mpos = static_cast<SizeType>(&*m - members);
+ map->erase(DropMapIterator(mit[mpos]));
+#endif
+ MemberIterator last(members + (o.size - 1));
+ if (o.size > 1 && m != last) {
+#if RAPIDJSON_USE_MEMBERSMAP
+ new (&mit[mpos]) MapIterator(DropMapIterator(mit[&*last - members]));
+ mit[mpos]->second = mpos;
+#endif
+ *m = *last; // Move the last one to this place
+ }
+ else {
+ m->~Member(); // Only one left, just destroy
+ }
+ --o.size;
+ return m;
+ }
+
+ MemberIterator DoEraseMembers(ConstMemberIterator first, ConstMemberIterator last) {
+ ObjectData& o = data_.o;
+ MemberIterator beg = MemberBegin(),
+ pos = beg + (first - beg),
+ end = MemberEnd();
+#if RAPIDJSON_USE_MEMBERSMAP
+ Map* &map = GetMap(GetMembersPointer());
+ MapIterator* mit = GetMapIterators(map);
+#endif
+ for (MemberIterator itr = pos; itr != last; ++itr) {
+#if RAPIDJSON_USE_MEMBERSMAP
+ map->erase(DropMapIterator(mit[itr - beg]));
+#endif
+ itr->~Member();
+ }
+#if RAPIDJSON_USE_MEMBERSMAP
+ if (first != last) {
+ // Move remaining members/iterators
+ MemberIterator next = pos + (last - first);
+ for (MemberIterator itr = pos; next != end; ++itr, ++next) {
+ std::memcpy(static_cast<void*>(&*itr), &*next, sizeof(Member));
+ SizeType mpos = static_cast<SizeType>(itr - beg);
+ new (&mit[mpos]) MapIterator(DropMapIterator(mit[next - beg]));
+ mit[mpos]->second = mpos;
+ }
+ }
+#else
+ std::memmove(static_cast<void*>(&*pos), &*last,
+ static_cast<size_t>(end - last) * sizeof(Member));
+#endif
+ o.size -= static_cast<SizeType>(last - first);
+ return pos;
+ }
+
+ template <typename SourceAllocator>
+ void DoCopyMembers(const GenericValue<Encoding,SourceAllocator>& rhs, Allocator& allocator, bool copyConstStrings) {
+ RAPIDJSON_ASSERT(rhs.GetType() == kObjectType);
+
+ data_.f.flags = kObjectFlag;
+ SizeType count = rhs.data_.o.size;
+ Member* lm = DoAllocMembers(count, allocator);
+ const typename GenericValue<Encoding,SourceAllocator>::Member* rm = rhs.GetMembersPointer();
+#if RAPIDJSON_USE_MEMBERSMAP
+ Map* &map = GetMap(lm);
+ MapIterator* mit = GetMapIterators(map);
+#endif
+ for (SizeType i = 0; i < count; i++) {
+ new (&lm[i].name) GenericValue(rm[i].name, allocator, copyConstStrings);
+ new (&lm[i].value) GenericValue(rm[i].value, allocator, copyConstStrings);
+#if RAPIDJSON_USE_MEMBERSMAP
+ new (&mit[i]) MapIterator(map->insert(MapPair(lm[i].name.data_, i)));
+#endif
+ }
+ data_.o.size = data_.o.capacity = count;
+ SetMembersPointer(lm);
+ }
+
// Initialize this value as array with initial data, without calling destructor.
void SetArrayRaw(GenericValue* values, SizeType count, Allocator& allocator) {
data_.f.flags = kArrayFlag;
@@ -2047,9 +2415,16 @@ private:
void SetObjectRaw(Member* members, SizeType count, Allocator& allocator) {
data_.f.flags = kObjectFlag;
if (count) {
- Member* m = static_cast<Member*>(allocator.Malloc(count * sizeof(Member)));
+ Member* m = DoAllocMembers(count, allocator);
SetMembersPointer(m);
std::memcpy(static_cast<void*>(m), members, count * sizeof(Member));
+#if RAPIDJSON_USE_MEMBERSMAP
+ Map* &map = GetMap(m);
+ MapIterator* mit = GetMapIterators(map);
+ for (SizeType i = 0; i < count; i++) {
+ new (&mit[i]) MapIterator(map->insert(MapPair(m[i].name.data_, i)));
+ }
+#endif
}
else
SetMembersPointer(0);
@@ -2094,11 +2469,11 @@ private:
const SizeType len1 = GetStringLength();
const SizeType len2 = rhs.GetStringLength();
- if (len1 != len2) { return false; }
+ if(len1 != len2) { return false; }
const Ch* const str1 = GetString();
const Ch* const str2 = rhs.GetString();
- if (str1 == str2) { return true; } // fast path for constant string
+ if(str1 == str2) { return true; } // fast path for constant string
return (std::memcmp(str1, str2, sizeof(Ch) * len1) == 0);
}
@@ -2120,12 +2495,13 @@ typedef GenericValue<UTF8<> > Value;
\tparam StackAllocator Allocator for allocating memory for stack during parsing.
\warning Although GenericDocument inherits from GenericValue, the API does \b not provide any virtual functions, especially no virtual destructor. To avoid memory leaks, do not \c delete a GenericDocument object via a pointer to a GenericValue.
*/
-template <typename Encoding, typename Allocator = MemoryPoolAllocator<>, typename StackAllocator = CrtAllocator>
+template <typename Encoding, typename Allocator = RAPIDJSON_DEFAULT_ALLOCATOR, typename StackAllocator = RAPIDJSON_DEFAULT_STACK_ALLOCATOR >
class GenericDocument : public GenericValue<Encoding, Allocator> {
public:
typedef typename Encoding::Ch Ch; //!< Character type derived from Encoding.
typedef GenericValue<Encoding, Allocator> ValueType; //!< Value type of the document.
typedef Allocator AllocatorType; //!< Allocator type from template parameter.
+ typedef StackAllocator StackAllocatorType; //!< StackAllocator type from template parameter.
//! Constructor
/*! Creates an empty document of specified type.
@@ -2170,6 +2546,13 @@ public:
#endif
~GenericDocument() {
+ // Clear the ::ValueType before ownAllocator is destroyed, ~ValueType()
+ // runs last and may access its elements or members which would be freed
+ // with an allocator like MemoryPoolAllocator (CrtAllocator does not
+ // free its data when destroyed, but MemoryPoolAllocator does).
+ if (ownAllocator_) {
+ ValueType::SetNull();
+ }
Destroy();
}
@@ -2505,6 +2888,7 @@ private:
//! GenericDocument with UTF8 encoding
typedef GenericDocument<UTF8<> > Document;
+
//! Helper class for accessing Value of array type.
/*!
Instance of this helper class is obtained by \c GenericValue::GetArray().
@@ -2529,6 +2913,7 @@ public:
GenericArray& operator=(const GenericArray& rhs) { value_ = rhs.value_; return *this; }
~GenericArray() {}
+ operator ValueType&() const { return value_; }
SizeType Size() const { return value_.Size(); }
SizeType Capacity() const { return value_.Capacity(); }
bool Empty() const { return value_.Empty(); }
@@ -2584,6 +2969,7 @@ public:
GenericObject& operator=(const GenericObject& rhs) { value_ = rhs.value_; return *this; }
~GenericObject() {}
+ operator ValueType&() const { return value_; }
SizeType MemberCount() const { return value_.MemberCount(); }
SizeType MemberCapacity() const { return value_.MemberCapacity(); }
bool ObjectEmpty() const { return value_.ObjectEmpty(); }
@@ -2649,4 +3035,9 @@ private:
RAPIDJSON_NAMESPACE_END
RAPIDJSON_DIAG_POP
+#ifdef RAPIDJSON_WINDOWS_GETOBJECT_WORKAROUND_APPLIED
+#pragma pop_macro("GetObject")
+#undef RAPIDJSON_WINDOWS_GETOBJECT_WORKAROUND_APPLIED
+#endif
+
#endif // RAPIDJSON_DOCUMENT_H_
diff --git a/src/native/external/rapidjson/encodedstream.h b/src/native/external/rapidjson/encodedstream.h
index 223601c0599b..cf046b89235f 100644
--- a/src/native/external/rapidjson/encodedstream.h
+++ b/src/native/external/rapidjson/encodedstream.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
diff --git a/src/native/external/rapidjson/encodings.h b/src/native/external/rapidjson/encodings.h
index 0b2446795015..50ad18bdc08c 100644
--- a/src/native/external/rapidjson/encodings.h
+++ b/src/native/external/rapidjson/encodings.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
diff --git a/src/native/external/rapidjson/error/en.h b/src/native/external/rapidjson/error/en.h
index 2db838bff239..c87b04eb133e 100644
--- a/src/native/external/rapidjson/error/en.h
+++ b/src/native/external/rapidjson/error/en.h
@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
-//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_ERROR_EN_H_
@@ -39,13 +39,13 @@ inline const RAPIDJSON_ERROR_CHARTYPE* GetParseError_En(ParseErrorCode parseErro
case kParseErrorDocumentEmpty: return RAPIDJSON_ERROR_STRING("The document is empty.");
case kParseErrorDocumentRootNotSingular: return RAPIDJSON_ERROR_STRING("The document root must not be followed by other values.");
-
+
case kParseErrorValueInvalid: return RAPIDJSON_ERROR_STRING("Invalid value.");
-
+
case kParseErrorObjectMissName: return RAPIDJSON_ERROR_STRING("Missing a name for object member.");
case kParseErrorObjectMissColon: return RAPIDJSON_ERROR_STRING("Missing a colon after a name of object member.");
case kParseErrorObjectMissCommaOrCurlyBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or '}' after an object member.");
-
+
case kParseErrorArrayMissCommaOrSquareBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or ']' after an array element.");
case kParseErrorStringUnicodeEscapeInvalidHex: return RAPIDJSON_ERROR_STRING("Incorrect hex digit after \\u escape in string.");
@@ -65,6 +65,108 @@ inline const RAPIDJSON_ERROR_CHARTYPE* GetParseError_En(ParseErrorCode parseErro
}
}
+//! Maps error code of validation into error message.
+/*!
+ \ingroup RAPIDJSON_ERRORS
+ \param validateErrorCode Error code obtained from validator.
+ \return the error message.
+ \note User can make a copy of this function for localization.
+ Using switch-case is safer for future modification of error codes.
+*/
+inline const RAPIDJSON_ERROR_CHARTYPE* GetValidateError_En(ValidateErrorCode validateErrorCode) {
+ switch (validateErrorCode) {
+ case kValidateErrors: return RAPIDJSON_ERROR_STRING("One or more validation errors have occurred");
+ case kValidateErrorNone: return RAPIDJSON_ERROR_STRING("No error.");
+
+ case kValidateErrorMultipleOf: return RAPIDJSON_ERROR_STRING("Number '%actual' is not a multiple of the 'multipleOf' value '%expected'.");
+ case kValidateErrorMaximum: return RAPIDJSON_ERROR_STRING("Number '%actual' is greater than the 'maximum' value '%expected'.");
+ case kValidateErrorExclusiveMaximum: return RAPIDJSON_ERROR_STRING("Number '%actual' is greater than or equal to the 'exclusiveMaximum' value '%expected'.");
+ case kValidateErrorMinimum: return RAPIDJSON_ERROR_STRING("Number '%actual' is less than the 'minimum' value '%expected'.");
+ case kValidateErrorExclusiveMinimum: return RAPIDJSON_ERROR_STRING("Number '%actual' is less than or equal to the 'exclusiveMinimum' value '%expected'.");
+
+ case kValidateErrorMaxLength: return RAPIDJSON_ERROR_STRING("String '%actual' is longer than the 'maxLength' value '%expected'.");
+ case kValidateErrorMinLength: return RAPIDJSON_ERROR_STRING("String '%actual' is shorter than the 'minLength' value '%expected'.");
+ case kValidateErrorPattern: return RAPIDJSON_ERROR_STRING("String '%actual' does not match the 'pattern' regular expression.");
+
+ case kValidateErrorMaxItems: return RAPIDJSON_ERROR_STRING("Array of length '%actual' is longer than the 'maxItems' value '%expected'.");
+ case kValidateErrorMinItems: return RAPIDJSON_ERROR_STRING("Array of length '%actual' is shorter than the 'minItems' value '%expected'.");
+ case kValidateErrorUniqueItems: return RAPIDJSON_ERROR_STRING("Array has duplicate items at indices '%duplicates' but 'uniqueItems' is true.");
+ case kValidateErrorAdditionalItems: return RAPIDJSON_ERROR_STRING("Array has an additional item at index '%disallowed' that is not allowed by the schema.");
+
+ case kValidateErrorMaxProperties: return RAPIDJSON_ERROR_STRING("Object has '%actual' members which is more than 'maxProperties' value '%expected'.");
+ case kValidateErrorMinProperties: return RAPIDJSON_ERROR_STRING("Object has '%actual' members which is less than 'minProperties' value '%expected'.");
+ case kValidateErrorRequired: return RAPIDJSON_ERROR_STRING("Object is missing the following members required by the schema: '%missing'.");
+ case kValidateErrorAdditionalProperties: return RAPIDJSON_ERROR_STRING("Object has an additional member '%disallowed' that is not allowed by the schema.");
+ case kValidateErrorPatternProperties: return RAPIDJSON_ERROR_STRING("Object has 'patternProperties' that are not allowed by the schema.");
+ case kValidateErrorDependencies: return RAPIDJSON_ERROR_STRING("Object has missing property or schema dependencies, refer to following errors.");
+
+ case kValidateErrorEnum: return RAPIDJSON_ERROR_STRING("Property has a value that is not one of its allowed enumerated values.");
+ case kValidateErrorType: return RAPIDJSON_ERROR_STRING("Property has a type '%actual' that is not in the following list: '%expected'.");
+
+ case kValidateErrorOneOf: return RAPIDJSON_ERROR_STRING("Property did not match any of the sub-schemas specified by 'oneOf', refer to following errors.");
+ case kValidateErrorOneOfMatch: return RAPIDJSON_ERROR_STRING("Property matched more than one of the sub-schemas specified by 'oneOf', indices '%matches'.");
+ case kValidateErrorAllOf: return RAPIDJSON_ERROR_STRING("Property did not match all of the sub-schemas specified by 'allOf', refer to following errors.");
+ case kValidateErrorAnyOf: return RAPIDJSON_ERROR_STRING("Property did not match any of the sub-schemas specified by 'anyOf', refer to following errors.");
+ case kValidateErrorNot: return RAPIDJSON_ERROR_STRING("Property matched the sub-schema specified by 'not'.");
+
+ case kValidateErrorReadOnly: return RAPIDJSON_ERROR_STRING("Property is read-only but has been provided when validation is for writing.");
+ case kValidateErrorWriteOnly: return RAPIDJSON_ERROR_STRING("Property is write-only but has been provided when validation is for reading.");
+
+ default: return RAPIDJSON_ERROR_STRING("Unknown error.");
+ }
+}
+
+//! Maps error code of schema document compilation into error message.
+/*!
+ \ingroup RAPIDJSON_ERRORS
+ \param schemaErrorCode Error code obtained from compiling the schema document.
+ \return the error message.
+ \note User can make a copy of this function for localization.
+ Using switch-case is safer for future modification of error codes.
+*/
+ inline const RAPIDJSON_ERROR_CHARTYPE* GetSchemaError_En(SchemaErrorCode schemaErrorCode) {
+ switch (schemaErrorCode) {
+ case kSchemaErrorNone: return RAPIDJSON_ERROR_STRING("No error.");
+
+ case kSchemaErrorStartUnknown: return RAPIDJSON_ERROR_STRING("Pointer '%value' to start of schema does not resolve to a location in the document.");
+ case kSchemaErrorRefPlainName: return RAPIDJSON_ERROR_STRING("$ref fragment '%value' must be a JSON pointer.");
+ case kSchemaErrorRefInvalid: return RAPIDJSON_ERROR_STRING("$ref must not be an empty string.");
+ case kSchemaErrorRefPointerInvalid: return RAPIDJSON_ERROR_STRING("$ref fragment '%value' is not a valid JSON pointer at offset '%offset'.");
+ case kSchemaErrorRefUnknown: return RAPIDJSON_ERROR_STRING("$ref '%value' does not resolve to a location in the target document.");
+ case kSchemaErrorRefCyclical: return RAPIDJSON_ERROR_STRING("$ref '%value' is cyclical.");
+ case kSchemaErrorRefNoRemoteProvider: return RAPIDJSON_ERROR_STRING("$ref is remote but there is no remote provider.");
+ case kSchemaErrorRefNoRemoteSchema: return RAPIDJSON_ERROR_STRING("$ref '%value' is remote but the remote provider did not return a schema.");
+ case kSchemaErrorRegexInvalid: return RAPIDJSON_ERROR_STRING("Invalid regular expression '%value' in 'pattern' or 'patternProperties'.");
+ case kSchemaErrorSpecUnknown: return RAPIDJSON_ERROR_STRING("JSON schema draft or OpenAPI version is not recognized.");
+ case kSchemaErrorSpecUnsupported: return RAPIDJSON_ERROR_STRING("JSON schema draft or OpenAPI version is not supported.");
+ case kSchemaErrorSpecIllegal: return RAPIDJSON_ERROR_STRING("Both JSON schema draft and OpenAPI version found in document.");
+ case kSchemaErrorReadOnlyAndWriteOnly: return RAPIDJSON_ERROR_STRING("Property must not be both 'readOnly' and 'writeOnly'.");
+
+ default: return RAPIDJSON_ERROR_STRING("Unknown error.");
+ }
+ }
+
+//! Maps error code of pointer parse into error message.
+/*!
+ \ingroup RAPIDJSON_ERRORS
+ \param pointerParseErrorCode Error code obtained from pointer parse.
+ \return the error message.
+ \note User can make a copy of this function for localization.
+ Using switch-case is safer for future modification of error codes.
+*/
+inline const RAPIDJSON_ERROR_CHARTYPE* GetPointerParseError_En(PointerParseErrorCode pointerParseErrorCode) {
+ switch (pointerParseErrorCode) {
+ case kPointerParseErrorNone: return RAPIDJSON_ERROR_STRING("No error.");
+
+ case kPointerParseErrorTokenMustBeginWithSolidus: return RAPIDJSON_ERROR_STRING("A token must begin with a '/'.");
+ case kPointerParseErrorInvalidEscape: return RAPIDJSON_ERROR_STRING("Invalid escape.");
+ case kPointerParseErrorInvalidPercentEncoding: return RAPIDJSON_ERROR_STRING("Invalid percent encoding in URI fragment.");
+ case kPointerParseErrorCharacterMustPercentEncode: return RAPIDJSON_ERROR_STRING("A character must be percent encoded in a URI fragment.");
+
+ default: return RAPIDJSON_ERROR_STRING("Unknown error.");
+ }
+}
+
RAPIDJSON_NAMESPACE_END
#ifdef __clang__
diff --git a/src/native/external/rapidjson/error/error.h b/src/native/external/rapidjson/error/error.h
index 9311d2f03bff..cae345db36d2 100644
--- a/src/native/external/rapidjson/error/error.h
+++ b/src/native/external/rapidjson/error/error.h
@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
-//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_ERROR_ERROR_H_
@@ -42,7 +42,7 @@ RAPIDJSON_DIAG_OFF(padded)
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_ERROR_STRING
-//! Macro for converting string literial to \ref RAPIDJSON_ERROR_CHARTYPE[].
+//! Macro for converting string literal to \ref RAPIDJSON_ERROR_CHARTYPE[].
/*! \ingroup RAPIDJSON_ERRORS
By default this conversion macro does nothing.
On Windows, user can define this macro as \c _T(x) for supporting both
@@ -152,6 +152,130 @@ private:
*/
typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetParseErrorFunc)(ParseErrorCode);
+///////////////////////////////////////////////////////////////////////////////
+// ValidateErrorCode
+
+//! Error codes when validating.
+/*! \ingroup RAPIDJSON_ERRORS
+ \see GenericSchemaValidator
+*/
+enum ValidateErrorCode {
+ kValidateErrors = -1, //!< Top level error code when kValidateContinueOnErrorsFlag set.
+ kValidateErrorNone = 0, //!< No error.
+
+ kValidateErrorMultipleOf, //!< Number is not a multiple of the 'multipleOf' value.
+ kValidateErrorMaximum, //!< Number is greater than the 'maximum' value.
+ kValidateErrorExclusiveMaximum, //!< Number is greater than or equal to the 'maximum' value.
+ kValidateErrorMinimum, //!< Number is less than the 'minimum' value.
+ kValidateErrorExclusiveMinimum, //!< Number is less than or equal to the 'minimum' value.
+
+ kValidateErrorMaxLength, //!< String is longer than the 'maxLength' value.
+ kValidateErrorMinLength, //!< String is longer than the 'maxLength' value.
+ kValidateErrorPattern, //!< String does not match the 'pattern' regular expression.
+
+ kValidateErrorMaxItems, //!< Array is longer than the 'maxItems' value.
+ kValidateErrorMinItems, //!< Array is shorter than the 'minItems' value.
+ kValidateErrorUniqueItems, //!< Array has duplicate items but 'uniqueItems' is true.
+ kValidateErrorAdditionalItems, //!< Array has additional items that are not allowed by the schema.
+
+ kValidateErrorMaxProperties, //!< Object has more members than 'maxProperties' value.
+ kValidateErrorMinProperties, //!< Object has less members than 'minProperties' value.
+ kValidateErrorRequired, //!< Object is missing one or more members required by the schema.
+ kValidateErrorAdditionalProperties, //!< Object has additional members that are not allowed by the schema.
+ kValidateErrorPatternProperties, //!< See other errors.
+ kValidateErrorDependencies, //!< Object has missing property or schema dependencies.
+
+ kValidateErrorEnum, //!< Property has a value that is not one of its allowed enumerated values.
+ kValidateErrorType, //!< Property has a type that is not allowed by the schema.
+
+ kValidateErrorOneOf, //!< Property did not match any of the sub-schemas specified by 'oneOf'.
+ kValidateErrorOneOfMatch, //!< Property matched more than one of the sub-schemas specified by 'oneOf'.
+ kValidateErrorAllOf, //!< Property did not match all of the sub-schemas specified by 'allOf'.
+ kValidateErrorAnyOf, //!< Property did not match any of the sub-schemas specified by 'anyOf'.
+ kValidateErrorNot, //!< Property matched the sub-schema specified by 'not'.
+
+ kValidateErrorReadOnly, //!< Property is read-only but has been provided when validation is for writing
+ kValidateErrorWriteOnly //!< Property is write-only but has been provided when validation is for reading
+};
+
+//! Function pointer type of GetValidateError().
+/*! \ingroup RAPIDJSON_ERRORS
+
+ This is the prototype for \c GetValidateError_X(), where \c X is a locale.
+ User can dynamically change locale in runtime, e.g.:
+\code
+ GetValidateErrorFunc GetValidateError = GetValidateError_En; // or whatever
+ const RAPIDJSON_ERROR_CHARTYPE* s = GetValidateError(validator.GetInvalidSchemaCode());
+\endcode
+*/
+typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetValidateErrorFunc)(ValidateErrorCode);
+
+///////////////////////////////////////////////////////////////////////////////
+// SchemaErrorCode
+
+//! Error codes when validating.
+/*! \ingroup RAPIDJSON_ERRORS
+ \see GenericSchemaValidator
+*/
+enum SchemaErrorCode {
+ kSchemaErrorNone = 0, //!< No error.
+
+ kSchemaErrorStartUnknown, //!< Pointer to start of schema does not resolve to a location in the document
+ kSchemaErrorRefPlainName, //!< $ref fragment must be a JSON pointer
+ kSchemaErrorRefInvalid, //!< $ref must not be an empty string
+ kSchemaErrorRefPointerInvalid, //!< $ref fragment is not a valid JSON pointer at offset
+ kSchemaErrorRefUnknown, //!< $ref does not resolve to a location in the target document
+ kSchemaErrorRefCyclical, //!< $ref is cyclical
+ kSchemaErrorRefNoRemoteProvider, //!< $ref is remote but there is no remote provider
+ kSchemaErrorRefNoRemoteSchema, //!< $ref is remote but the remote provider did not return a schema
+ kSchemaErrorRegexInvalid, //!< Invalid regular expression in 'pattern' or 'patternProperties'
+ kSchemaErrorSpecUnknown, //!< JSON schema draft or OpenAPI version is not recognized
+ kSchemaErrorSpecUnsupported, //!< JSON schema draft or OpenAPI version is not supported
+ kSchemaErrorSpecIllegal, //!< Both JSON schema draft and OpenAPI version found in document
+ kSchemaErrorReadOnlyAndWriteOnly //!< Property must not be both 'readOnly' and 'writeOnly'
+};
+
+//! Function pointer type of GetSchemaError().
+/*! \ingroup RAPIDJSON_ERRORS
+
+ This is the prototype for \c GetSchemaError_X(), where \c X is a locale.
+ User can dynamically change locale in runtime, e.g.:
+\code
+ GetSchemaErrorFunc GetSchemaError = GetSchemaError_En; // or whatever
+ const RAPIDJSON_ERROR_CHARTYPE* s = GetSchemaError(validator.GetInvalidSchemaCode());
+\endcode
+*/
+typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetSchemaErrorFunc)(SchemaErrorCode);
+
+///////////////////////////////////////////////////////////////////////////////
+// PointerParseErrorCode
+
+//! Error code of JSON pointer parsing.
+/*! \ingroup RAPIDJSON_ERRORS
+ \see GenericPointer::GenericPointer, GenericPointer::GetParseErrorCode
+*/
+enum PointerParseErrorCode {
+ kPointerParseErrorNone = 0, //!< The parse is successful
+
+ kPointerParseErrorTokenMustBeginWithSolidus, //!< A token must begin with a '/'
+ kPointerParseErrorInvalidEscape, //!< Invalid escape
+ kPointerParseErrorInvalidPercentEncoding, //!< Invalid percent encoding in URI fragment
+ kPointerParseErrorCharacterMustPercentEncode //!< A character must percent encoded in URI fragment
+};
+
+//! Function pointer type of GetPointerParseError().
+/*! \ingroup RAPIDJSON_ERRORS
+
+ This is the prototype for \c GetPointerParseError_X(), where \c X is a locale.
+ User can dynamically change locale in runtime, e.g.:
+\code
+ GetPointerParseErrorFunc GetPointerParseError = GetPointerParseError_En; // or whatever
+ const RAPIDJSON_ERROR_CHARTYPE* s = GetPointerParseError(pointer.GetParseErrorCode());
+\endcode
+*/
+typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetPointerParseErrorFunc)(PointerParseErrorCode);
+
+
RAPIDJSON_NAMESPACE_END
#ifdef __clang__
diff --git a/src/native/external/rapidjson/filereadstream.h b/src/native/external/rapidjson/filereadstream.h
deleted file mode 100644
index 6b343707ade0..000000000000
--- a/src/native/external/rapidjson/filereadstream.h
+++ /dev/null
@@ -1,99 +0,0 @@
-// Tencent is pleased to support the open source community by making RapidJSON available.
-//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
-//
-// Licensed under the MIT License (the "License"); you may not use this file except
-// in compliance with the License. You may obtain a copy of the License at
-//
-// http://opensource.org/licenses/MIT
-//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations under the License.
-
-#ifndef RAPIDJSON_FILEREADSTREAM_H_
-#define RAPIDJSON_FILEREADSTREAM_H_
-
-#include "stream.h"
-#include <cstdio>
-
-#ifdef __clang__
-RAPIDJSON_DIAG_PUSH
-RAPIDJSON_DIAG_OFF(padded)
-RAPIDJSON_DIAG_OFF(unreachable-code)
-RAPIDJSON_DIAG_OFF(missing-noreturn)
-#endif
-
-RAPIDJSON_NAMESPACE_BEGIN
-
-//! File byte stream for input using fread().
-/*!
- \note implements Stream concept
-*/
-class FileReadStream {
-public:
- typedef char Ch; //!< Character type (byte).
-
- //! Constructor.
- /*!
- \param fp File pointer opened for read.
- \param buffer user-supplied buffer.
- \param bufferSize size of buffer in bytes. Must >=4 bytes.
- */
- FileReadStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) {
- RAPIDJSON_ASSERT(fp_ != 0);
- RAPIDJSON_ASSERT(bufferSize >= 4);
- Read();
- }
-
- Ch Peek() const { return *current_; }
- Ch Take() { Ch c = *current_; Read(); return c; }
- size_t Tell() const { return count_ + static_cast<size_t>(current_ - buffer_); }
-
- // Not implemented
- void Put(Ch) { RAPIDJSON_ASSERT(false); }
- void Flush() { RAPIDJSON_ASSERT(false); }
- Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
- size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
-
- // For encoding detection only.
- const Ch* Peek4() const {
- return (current_ + 4 - !eof_ <= bufferLast_) ? current_ : 0;
- }
-
-private:
- void Read() {
- if (current_ < bufferLast_)
- ++current_;
- else if (!eof_) {
- count_ += readCount_;
- readCount_ = std::fread(buffer_, 1, bufferSize_, fp_);
- bufferLast_ = buffer_ + readCount_ - 1;
- current_ = buffer_;
-
- if (readCount_ < bufferSize_) {
- buffer_[readCount_] = '\0';
- ++bufferLast_;
- eof_ = true;
- }
- }
- }
-
- std::FILE* fp_;
- Ch *buffer_;
- size_t bufferSize_;
- Ch *bufferLast_;
- Ch *current_;
- size_t readCount_;
- size_t count_; //!< Number of characters read
- bool eof_;
-};
-
-RAPIDJSON_NAMESPACE_END
-
-#ifdef __clang__
-RAPIDJSON_DIAG_POP
-#endif
-
-#endif // RAPIDJSON_FILESTREAM_H_
diff --git a/src/native/external/rapidjson/filewritestream.h b/src/native/external/rapidjson/filewritestream.h
deleted file mode 100644
index 8b48fee197c4..000000000000
--- a/src/native/external/rapidjson/filewritestream.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Tencent is pleased to support the open source community by making RapidJSON available.
-//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
-//
-// Licensed under the MIT License (the "License"); you may not use this file except
-// in compliance with the License. You may obtain a copy of the License at
-//
-// http://opensource.org/licenses/MIT
-//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations under the License.
-
-#ifndef RAPIDJSON_FILEWRITESTREAM_H_
-#define RAPIDJSON_FILEWRITESTREAM_H_
-
-#include "stream.h"
-#include <cstdio>
-
-#ifdef __clang__
-RAPIDJSON_DIAG_PUSH
-RAPIDJSON_DIAG_OFF(unreachable-code)
-#endif
-
-RAPIDJSON_NAMESPACE_BEGIN
-
-//! Wrapper of C file stream for output using fwrite().
-/*!
- \note implements Stream concept
-*/
-class FileWriteStream {
-public:
- typedef char Ch; //!< Character type. Only support char.
-
- FileWriteStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferEnd_(buffer + bufferSize), current_(buffer_) {
- RAPIDJSON_ASSERT(fp_ != 0);
- }
-
- void Put(char c) {
- if (current_ >= bufferEnd_)
- Flush();
-
- *current_++ = c;
- }
-
- void PutN(char c, size_t n) {
- size_t avail = static_cast<size_t>(bufferEnd_ - current_);
- while (n > avail) {
- std::memset(current_, c, avail);
- current_ += avail;
- Flush();
- n -= avail;
- avail = static_cast<size_t>(bufferEnd_ - current_);
- }
-
- if (n > 0) {
- std::memset(current_, c, n);
- current_ += n;
- }
- }
-
- void Flush() {
- if (current_ != buffer_) {
- size_t result = std::fwrite(buffer_, 1, static_cast<size_t>(current_ - buffer_), fp_);
- if (result < static_cast<size_t>(current_ - buffer_)) {
- // failure deliberately ignored at this time
- // added to avoid warn_unused_result build errors
- }
- current_ = buffer_;
- }
- }
-
- // Not implemented
- char Peek() const { RAPIDJSON_ASSERT(false); return 0; }
- char Take() { RAPIDJSON_ASSERT(false); return 0; }
- size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
- char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
- size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; }
-
-private:
- // Prohibit copy constructor & assignment operator.
- FileWriteStream(const FileWriteStream&);
- FileWriteStream& operator=(const FileWriteStream&);
-
- std::FILE* fp_;
- char *buffer_;
- char *bufferEnd_;
- char *current_;
-};
-
-//! Implement specialized version of PutN() with memset() for better performance.
-template<>
-inline void PutN(FileWriteStream& stream, char c, size_t n) {
- stream.PutN(c, n);
-}
-
-RAPIDJSON_NAMESPACE_END
-
-#ifdef __clang__
-RAPIDJSON_DIAG_POP
-#endif
-
-#endif // RAPIDJSON_FILESTREAM_H_
diff --git a/src/native/external/rapidjson/fwd.h b/src/native/external/rapidjson/fwd.h
index e8104e841bcd..d62f77f0ecfa 100644
--- a/src/native/external/rapidjson/fwd.h
+++ b/src/native/external/rapidjson/fwd.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
@@ -102,7 +102,7 @@ class PrettyWriter;
// document.h
template <typename Encoding, typename Allocator>
-struct GenericMember;
+class GenericMember;
template <bool Const, typename Encoding, typename Allocator>
class GenericMemberIterator;
diff --git a/src/native/external/rapidjson/internal/biginteger.h b/src/native/external/rapidjson/internal/biginteger.h
index a31c8a88d6eb..4930043dc7c5 100644
--- a/src/native/external/rapidjson/internal/biginteger.h
+++ b/src/native/external/rapidjson/internal/biginteger.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
@@ -17,9 +17,13 @@
#include "../rapidjson.h"
-#if defined(_MSC_VER) && !__INTEL_COMPILER && defined(_M_AMD64)
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && defined(_M_AMD64)
#include <intrin.h> // for _umul128
+#if !defined(_ARM64EC_)
#pragma intrinsic(_umul128)
+#else
+#pragma comment(lib,"softintrin")
+#endif
#endif
RAPIDJSON_NAMESPACE_BEGIN
@@ -37,7 +41,8 @@ public:
digits_[0] = u;
}
- BigInteger(const char* decimals, size_t length) : count_(1) {
+ template<typename Ch>
+ BigInteger(const Ch* decimals, size_t length) : count_(1) {
RAPIDJSON_ASSERT(length > 0);
digits_[0] = 0;
size_t i = 0;
@@ -221,7 +226,8 @@ public:
bool IsZero() const { return count_ == 1 && digits_[0] == 0; }
private:
- void AppendDecimal64(const char* begin, const char* end) {
+ template<typename Ch>
+ void AppendDecimal64(const Ch* begin, const Ch* end) {
uint64_t u = ParseUint64(begin, end);
if (IsZero())
*this = u;
@@ -236,11 +242,12 @@ private:
digits_[count_++] = digit;
}
- static uint64_t ParseUint64(const char* begin, const char* end) {
+ template<typename Ch>
+ static uint64_t ParseUint64(const Ch* begin, const Ch* end) {
uint64_t r = 0;
- for (const char* p = begin; p != end; ++p) {
- RAPIDJSON_ASSERT(*p >= '0' && *p <= '9');
- r = r * 10u + static_cast<unsigned>(*p - '0');
+ for (const Ch* p = begin; p != end; ++p) {
+ RAPIDJSON_ASSERT(*p >= Ch('0') && *p <= Ch('9'));
+ r = r * 10u + static_cast<unsigned>(*p - Ch('0'));
}
return r;
}
@@ -252,7 +259,7 @@ private:
if (low < k)
(*outHigh)++;
return low;
-#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
+#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
__extension__ typedef unsigned __int128 uint128;
uint128 p = static_cast<uint128>(a) * static_cast<uint128>(b);
p += k;
diff --git a/src/native/external/rapidjson/internal/clzll.h b/src/native/external/rapidjson/internal/clzll.h
new file mode 100644
index 000000000000..8fc5118aa47b
--- /dev/null
+++ b/src/native/external/rapidjson/internal/clzll.h
@@ -0,0 +1,71 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_CLZLL_H_
+#define RAPIDJSON_CLZLL_H_
+
+#include "../rapidjson.h"
+
+#if defined(_MSC_VER) && !defined(UNDER_CE)
+#include <intrin.h>
+#if defined(_WIN64)
+#pragma intrinsic(_BitScanReverse64)
+#else
+#pragma intrinsic(_BitScanReverse)
+#endif
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+inline uint32_t clzll(uint64_t x) {
+ // Passing 0 to __builtin_clzll is UB in GCC and results in an
+ // infinite loop in the software implementation.
+ RAPIDJSON_ASSERT(x != 0);
+
+#if defined(_MSC_VER) && !defined(UNDER_CE)
+ unsigned long r = 0;
+#if defined(_WIN64)
+ _BitScanReverse64(&r, x);
+#else
+ // Scan the high 32 bits.
+ if (_BitScanReverse(&r, static_cast<uint32_t>(x >> 32)))
+ return 63 - (r + 32);
+
+ // Scan the low 32 bits.
+ _BitScanReverse(&r, static_cast<uint32_t>(x & 0xFFFFFFFF));
+#endif // _WIN64
+
+ return 63 - r;
+#elif (defined(__GNUC__) && __GNUC__ >= 4) || RAPIDJSON_HAS_BUILTIN(__builtin_clzll)
+ // __builtin_clzll wrapper
+ return static_cast<uint32_t>(__builtin_clzll(x));
+#else
+ // naive version
+ uint32_t r = 0;
+ while (!(x & (static_cast<uint64_t>(1) << 63))) {
+ x <<= 1;
+ ++r;
+ }
+
+ return r;
+#endif // _MSC_VER
+}
+
+#define RAPIDJSON_CLZLL RAPIDJSON_NAMESPACE::internal::clzll
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_CLZLL_H_
diff --git a/src/native/external/rapidjson/internal/diyfp.h b/src/native/external/rapidjson/internal/diyfp.h
index b6c2cf5618d4..1f60fb60ca04 100644
--- a/src/native/external/rapidjson/internal/diyfp.h
+++ b/src/native/external/rapidjson/internal/diyfp.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
@@ -20,12 +20,16 @@
#define RAPIDJSON_DIYFP_H_
#include "../rapidjson.h"
+#include "clzll.h"
#include <limits>
#if defined(_MSC_VER) && defined(_M_AMD64) && !defined(__INTEL_COMPILER)
#include <intrin.h>
-#pragma intrinsic(_BitScanReverse64)
+#if !defined(_ARM64EC_)
#pragma intrinsic(_umul128)
+#else
+#pragma comment(lib,"softintrin")
+#endif
#endif
RAPIDJSON_NAMESPACE_BEGIN
@@ -75,7 +79,7 @@ struct DiyFp {
if (l & (uint64_t(1) << 63)) // rounding
h++;
return DiyFp(h, e + rhs.e + 64);
-#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
+#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
__extension__ typedef unsigned __int128 uint128;
uint128 p = static_cast<uint128>(f) * static_cast<uint128>(rhs.f);
uint64_t h = static_cast<uint64_t>(p >> 64);
@@ -100,22 +104,8 @@ struct DiyFp {
}
DiyFp Normalize() const {
- RAPIDJSON_ASSERT(f != 0); // https://stackoverflow.com/a/26809183/291737
-#if defined(_MSC_VER) && defined(_M_AMD64)
- unsigned long index;
- _BitScanReverse64(&index, f);
- return DiyFp(f << (63 - index), e - (63 - index));
-#elif defined(__GNUC__) && __GNUC__ >= 4
- int s = __builtin_clzll(f);
+ int s = static_cast<int>(clzll(f));
return DiyFp(f << s, e - s);
-#else
- DiyFp res = *this;
- while (!(res.f & (static_cast<uint64_t>(1) << 63))) {
- res.f <<= 1;
- res.e--;
- }
- return res;
-#endif
}
DiyFp NormalizeBoundary() const {
diff --git a/src/native/external/rapidjson/internal/dtoa.h b/src/native/external/rapidjson/internal/dtoa.h
index bf2e9b2e59a4..cd456721a71c 100644
--- a/src/native/external/rapidjson/internal/dtoa.h
+++ b/src/native/external/rapidjson/internal/dtoa.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
@@ -58,7 +58,11 @@ inline int CountDecimalDigit32(uint32_t n) {
}
inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buffer, int* len, int* K) {
- static const uint32_t kPow10[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 };
+ static const uint64_t kPow10[] = { 1ULL, 10ULL, 100ULL, 1000ULL, 10000ULL, 100000ULL, 1000000ULL, 10000000ULL, 100000000ULL,
+ 1000000000ULL, 10000000000ULL, 100000000000ULL, 1000000000000ULL,
+ 10000000000000ULL, 100000000000000ULL, 1000000000000000ULL,
+ 10000000000000000ULL, 100000000000000000ULL, 1000000000000000000ULL,
+ 10000000000000000000ULL };
const DiyFp one(uint64_t(1) << -Mp.e, Mp.e);
const DiyFp wp_w = Mp - W;
uint32_t p1 = static_cast<uint32_t>(Mp.f >> -one.e);
@@ -86,7 +90,7 @@ inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buff
uint64_t tmp = (static_cast<uint64_t>(p1) << -one.e) + p2;
if (tmp <= delta) {
*K += kappa;
- GrisuRound(buffer, *len, delta, tmp, static_cast<uint64_t>(kPow10[kappa]) << -one.e, wp_w.f);
+ GrisuRound(buffer, *len, delta, tmp, kPow10[kappa] << -one.e, wp_w.f);
return;
}
}
@@ -103,7 +107,7 @@ inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buff
if (p2 < delta) {
*K += kappa;
int index = -kappa;
- GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * (index < 9 ? kPow10[index] : 0));
+ GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * (index < 20 ? kPow10[index] : 0));
return;
}
}
diff --git a/src/native/external/rapidjson/internal/ieee754.h b/src/native/external/rapidjson/internal/ieee754.h
index c2684ba2a35f..68c9e96649b8 100644
--- a/src/native/external/rapidjson/internal/ieee754.h
+++ b/src/native/external/rapidjson/internal/ieee754.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
diff --git a/src/native/external/rapidjson/internal/itoa.h b/src/native/external/rapidjson/internal/itoa.h
index 9b1c45cc1b4a..9fe8c932ffa6 100644
--- a/src/native/external/rapidjson/internal/itoa.h
+++ b/src/native/external/rapidjson/internal/itoa.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
diff --git a/src/native/external/rapidjson/internal/meta.h b/src/native/external/rapidjson/internal/meta.h
index d401edf85150..27092dc0d69c 100644
--- a/src/native/external/rapidjson/internal/meta.h
+++ b/src/native/external/rapidjson/internal/meta.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
diff --git a/src/native/external/rapidjson/internal/pow10.h b/src/native/external/rapidjson/internal/pow10.h
index 02f475d705fc..eae1a43ed1a0 100644
--- a/src/native/external/rapidjson/internal/pow10.h
+++ b/src/native/external/rapidjson/internal/pow10.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
diff --git a/src/native/external/rapidjson/internal/regex.h b/src/native/external/rapidjson/internal/regex.h
deleted file mode 100644
index 16e355921f88..000000000000
--- a/src/native/external/rapidjson/internal/regex.h
+++ /dev/null
@@ -1,740 +0,0 @@
-// Tencent is pleased to support the open source community by making RapidJSON available.
-//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
-//
-// Licensed under the MIT License (the "License"); you may not use this file except
-// in compliance with the License. You may obtain a copy of the License at
-//
-// http://opensource.org/licenses/MIT
-//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations under the License.
-
-#ifndef RAPIDJSON_INTERNAL_REGEX_H_
-#define RAPIDJSON_INTERNAL_REGEX_H_
-
-#include "../allocators.h"
-#include "../stream.h"
-#include "stack.h"
-
-#ifdef __clang__
-RAPIDJSON_DIAG_PUSH
-RAPIDJSON_DIAG_OFF(padded)
-RAPIDJSON_DIAG_OFF(switch-enum)
-RAPIDJSON_DIAG_OFF(implicit-fallthrough)
-#elif defined(_MSC_VER)
-RAPIDJSON_DIAG_PUSH
-RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
-#endif
-
-#ifdef __GNUC__
-RAPIDJSON_DIAG_PUSH
-RAPIDJSON_DIAG_OFF(effc++)
-#if __GNUC__ >= 7
-RAPIDJSON_DIAG_OFF(implicit-fallthrough)
-#endif
-#endif
-
-#ifndef RAPIDJSON_REGEX_VERBOSE
-#define RAPIDJSON_REGEX_VERBOSE 0
-#endif
-
-RAPIDJSON_NAMESPACE_BEGIN
-namespace internal {
-
-///////////////////////////////////////////////////////////////////////////////
-// DecodedStream
-
-template <typename SourceStream, typename Encoding>
-class DecodedStream {
-public:
- DecodedStream(SourceStream& ss) : ss_(ss), codepoint_() { Decode(); }
- unsigned Peek() { return codepoint_; }
- unsigned Take() {
- unsigned c = codepoint_;
- if (c) // No further decoding when '\0'
- Decode();
- return c;
- }
-
-private:
- void Decode() {
- if (!Encoding::Decode(ss_, &codepoint_))
- codepoint_ = 0;
- }
-
- SourceStream& ss_;
- unsigned codepoint_;
-};
-
-///////////////////////////////////////////////////////////////////////////////
-// GenericRegex
-
-static const SizeType kRegexInvalidState = ~SizeType(0); //!< Represents an invalid index in GenericRegex::State::out, out1
-static const SizeType kRegexInvalidRange = ~SizeType(0);
-
-template <typename Encoding, typename Allocator>
-class GenericRegexSearch;
-
-//! Regular expression engine with subset of ECMAscript grammar.
-/*!
- Supported regular expression syntax:
- - \c ab Concatenation
- - \c a|b Alternation
- - \c a? Zero or one
- - \c a* Zero or more
- - \c a+ One or more
- - \c a{3} Exactly 3 times
- - \c a{3,} At least 3 times
- - \c a{3,5} 3 to 5 times
- - \c (ab) Grouping
- - \c ^a At the beginning
- - \c a$ At the end
- - \c . Any character
- - \c [abc] Character classes
- - \c [a-c] Character class range
- - \c [a-z0-9_] Character class combination
- - \c [^abc] Negated character classes
- - \c [^a-c] Negated character class range
- - \c [\b] Backspace (U+0008)
- - \c \\| \\\\ ... Escape characters
- - \c \\f Form feed (U+000C)
- - \c \\n Line feed (U+000A)
- - \c \\r Carriage return (U+000D)
- - \c \\t Tab (U+0009)
- - \c \\v Vertical tab (U+000B)
-
- \note This is a Thompson NFA engine, implemented with reference to
- Cox, Russ. "Regular Expression Matching Can Be Simple And Fast (but is slow in Java, Perl, PHP, Python, Ruby,...).",
- https://swtch.com/~rsc/regexp/regexp1.html
-*/
-template <typename Encoding, typename Allocator = CrtAllocator>
-class GenericRegex {
-public:
- typedef Encoding EncodingType;
- typedef typename Encoding::Ch Ch;
- template <typename, typename> friend class GenericRegexSearch;
-
- GenericRegex(const Ch* source, Allocator* allocator = 0) :
- ownAllocator_(allocator ? 0 : RAPIDJSON_NEW(Allocator)()), allocator_(allocator ? allocator : ownAllocator_),
- states_(allocator_, 256), ranges_(allocator_, 256), root_(kRegexInvalidState), stateCount_(), rangeCount_(),
- anchorBegin_(), anchorEnd_()
- {
- GenericStringStream<Encoding> ss(source);
- DecodedStream<GenericStringStream<Encoding>, Encoding> ds(ss);
- Parse(ds);
- }
-
- ~GenericRegex()
- {
- RAPIDJSON_DELETE(ownAllocator_);
- }
-
- bool IsValid() const {
- return root_ != kRegexInvalidState;
- }
-
-private:
- enum Operator {
- kZeroOrOne,
- kZeroOrMore,
- kOneOrMore,
- kConcatenation,
- kAlternation,
- kLeftParenthesis
- };
-
- static const unsigned kAnyCharacterClass = 0xFFFFFFFF; //!< For '.'
- static const unsigned kRangeCharacterClass = 0xFFFFFFFE;
- static const unsigned kRangeNegationFlag = 0x80000000;
-
- struct Range {
- unsigned start; //
- unsigned end;
- SizeType next;
- };
-
- struct State {
- SizeType out; //!< Equals to kInvalid for matching state
- SizeType out1; //!< Equals to non-kInvalid for split
- SizeType rangeStart;
- unsigned codepoint;
- };
-
- struct Frag {
- Frag(SizeType s, SizeType o, SizeType m) : start(s), out(o), minIndex(m) {}
- SizeType start;
- SizeType out; //!< link-list of all output states
- SizeType minIndex;
- };
-
- State& GetState(SizeType index) {
- RAPIDJSON_ASSERT(index < stateCount_);
- return states_.template Bottom<State>()[index];
- }
-
- const State& GetState(SizeType index) const {
- RAPIDJSON_ASSERT(index < stateCount_);
- return states_.template Bottom<State>()[index];
- }
-
- Range& GetRange(SizeType index) {
- RAPIDJSON_ASSERT(index < rangeCount_);
- return ranges_.template Bottom<Range>()[index];
- }
-
- const Range& GetRange(SizeType index) const {
- RAPIDJSON_ASSERT(index < rangeCount_);
- return ranges_.template Bottom<Range>()[index];
- }
-
- template <typename InputStream>
- void Parse(DecodedStream<InputStream, Encoding>& ds) {
- Stack<Allocator> operandStack(allocator_, 256); // Frag
- Stack<Allocator> operatorStack(allocator_, 256); // Operator
- Stack<Allocator> atomCountStack(allocator_, 256); // unsigned (Atom per parenthesis)
-
- *atomCountStack.template Push<unsigned>() = 0;
-
- unsigned codepoint;
- while (ds.Peek() != 0) {
- switch (codepoint = ds.Take()) {
- case '^':
- anchorBegin_ = true;
- break;
-
- case '$':
- anchorEnd_ = true;
- break;
-
- case '|':
- while (!operatorStack.Empty() && *operatorStack.template Top<Operator>() < kAlternation)
- if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1)))
- return;
- *operatorStack.template Push<Operator>() = kAlternation;
- *atomCountStack.template Top<unsigned>() = 0;
- break;
-
- case '(':
- *operatorStack.template Push<Operator>() = kLeftParenthesis;
- *atomCountStack.template Push<unsigned>() = 0;
- break;
-
- case ')':
- while (!operatorStack.Empty() && *operatorStack.template Top<Operator>() != kLeftParenthesis)
- if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1)))
- return;
- if (operatorStack.Empty())
- return;
- operatorStack.template Pop<Operator>(1);
- atomCountStack.template Pop<unsigned>(1);
- ImplicitConcatenation(atomCountStack, operatorStack);
- break;
-
- case '?':
- if (!Eval(operandStack, kZeroOrOne))
- return;
- break;
-
- case '*':
- if (!Eval(operandStack, kZeroOrMore))
- return;
- break;
-
- case '+':
- if (!Eval(operandStack, kOneOrMore))
- return;
- break;
-
- case '{':
- {
- unsigned n, m;
- if (!ParseUnsigned(ds, &n))
- return;
-
- if (ds.Peek() == ',') {
- ds.Take();
- if (ds.Peek() == '}')
- m = kInfinityQuantifier;
- else if (!ParseUnsigned(ds, &m) || m < n)
- return;
- }
- else
- m = n;
-
- if (!EvalQuantifier(operandStack, n, m) || ds.Peek() != '}')
- return;
- ds.Take();
- }
- break;
-
- case '.':
- PushOperand(operandStack, kAnyCharacterClass);
- ImplicitConcatenation(atomCountStack, operatorStack);
- break;
-
- case '[':
- {
- SizeType range;
- if (!ParseRange(ds, &range))
- return;
- SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, kRangeCharacterClass);
- GetState(s).rangeStart = range;
- *operandStack.template Push<Frag>() = Frag(s, s, s);
- }
- ImplicitConcatenation(atomCountStack, operatorStack);
- break;
-
- case '\\': // Escape character
- if (!CharacterEscape(ds, &codepoint))
- return; // Unsupported escape character
- // fall through to default
-
- default: // Pattern character
- PushOperand(operandStack, codepoint);
- ImplicitConcatenation(atomCountStack, operatorStack);
- }
- }
-
- while (!operatorStack.Empty())
- if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1)))
- return;
-
- // Link the operand to matching state.
- if (operandStack.GetSize() == sizeof(Frag)) {
- Frag* e = operandStack.template Pop<Frag>(1);
- Patch(e->out, NewState(kRegexInvalidState, kRegexInvalidState, 0));
- root_ = e->start;
-
-#if RAPIDJSON_REGEX_VERBOSE
- printf("root: %d\n", root_);
- for (SizeType i = 0; i < stateCount_ ; i++) {
- State& s = GetState(i);
- printf("[%2d] out: %2d out1: %2d c: '%c'\n", i, s.out, s.out1, (char)s.codepoint);
- }
- printf("\n");
-#endif
- }
- }
-
- SizeType NewState(SizeType out, SizeType out1, unsigned codepoint) {
- State* s = states_.template Push<State>();
- s->out = out;
- s->out1 = out1;
- s->codepoint = codepoint;
- s->rangeStart = kRegexInvalidRange;
- return stateCount_++;
- }
-
- void PushOperand(Stack<Allocator>& operandStack, unsigned codepoint) {
- SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, codepoint);
- *operandStack.template Push<Frag>() = Frag(s, s, s);
- }
-
- void ImplicitConcatenation(Stack<Allocator>& atomCountStack, Stack<Allocator>& operatorStack) {
- if (*atomCountStack.template Top<unsigned>())
- *operatorStack.template Push<Operator>() = kConcatenation;
- (*atomCountStack.template Top<unsigned>())++;
- }
-
- SizeType Append(SizeType l1, SizeType l2) {
- SizeType old = l1;
- while (GetState(l1).out != kRegexInvalidState)
- l1 = GetState(l1).out;
- GetState(l1).out = l2;
- return old;
- }
-
- void Patch(SizeType l, SizeType s) {
- for (SizeType next; l != kRegexInvalidState; l = next) {
- next = GetState(l).out;
- GetState(l).out = s;
- }
- }
-
- bool Eval(Stack<Allocator>& operandStack, Operator op) {
- switch (op) {
- case kConcatenation:
- RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag) * 2);
- {
- Frag e2 = *operandStack.template Pop<Frag>(1);
- Frag e1 = *operandStack.template Pop<Frag>(1);
- Patch(e1.out, e2.start);
- *operandStack.template Push<Frag>() = Frag(e1.start, e2.out, Min(e1.minIndex, e2.minIndex));
- }
- return true;
-
- case kAlternation:
- if (operandStack.GetSize() >= sizeof(Frag) * 2) {
- Frag e2 = *operandStack.template Pop<Frag>(1);
- Frag e1 = *operandStack.template Pop<Frag>(1);
- SizeType s = NewState(e1.start, e2.start, 0);
- *operandStack.template Push<Frag>() = Frag(s, Append(e1.out, e2.out), Min(e1.minIndex, e2.minIndex));
- return true;
- }
- return false;
-
- case kZeroOrOne:
- if (operandStack.GetSize() >= sizeof(Frag)) {
- Frag e = *operandStack.template Pop<Frag>(1);
- SizeType s = NewState(kRegexInvalidState, e.start, 0);
- *operandStack.template Push<Frag>() = Frag(s, Append(e.out, s), e.minIndex);
- return true;
- }
- return false;
-
- case kZeroOrMore:
- if (operandStack.GetSize() >= sizeof(Frag)) {
- Frag e = *operandStack.template Pop<Frag>(1);
- SizeType s = NewState(kRegexInvalidState, e.start, 0);
- Patch(e.out, s);
- *operandStack.template Push<Frag>() = Frag(s, s, e.minIndex);
- return true;
- }
- return false;
-
- case kOneOrMore:
- if (operandStack.GetSize() >= sizeof(Frag)) {
- Frag e = *operandStack.template Pop<Frag>(1);
- SizeType s = NewState(kRegexInvalidState, e.start, 0);
- Patch(e.out, s);
- *operandStack.template Push<Frag>() = Frag(e.start, s, e.minIndex);
- return true;
- }
- return false;
-
- default:
- // syntax error (e.g. unclosed kLeftParenthesis)
- return false;
- }
- }
-
- bool EvalQuantifier(Stack<Allocator>& operandStack, unsigned n, unsigned m) {
- RAPIDJSON_ASSERT(n <= m);
- RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag));
-
- if (n == 0) {
- if (m == 0) // a{0} not support
- return false;
- else if (m == kInfinityQuantifier)
- Eval(operandStack, kZeroOrMore); // a{0,} -> a*
- else {
- Eval(operandStack, kZeroOrOne); // a{0,5} -> a?
- for (unsigned i = 0; i < m - 1; i++)
- CloneTopOperand(operandStack); // a{0,5} -> a? a? a? a? a?
- for (unsigned i = 0; i < m - 1; i++)
- Eval(operandStack, kConcatenation); // a{0,5} -> a?a?a?a?a?
- }
- return true;
- }
-
- for (unsigned i = 0; i < n - 1; i++) // a{3} -> a a a
- CloneTopOperand(operandStack);
-
- if (m == kInfinityQuantifier)
- Eval(operandStack, kOneOrMore); // a{3,} -> a a a+
- else if (m > n) {
- CloneTopOperand(operandStack); // a{3,5} -> a a a a
- Eval(operandStack, kZeroOrOne); // a{3,5} -> a a a a?
- for (unsigned i = n; i < m - 1; i++)
- CloneTopOperand(operandStack); // a{3,5} -> a a a a? a?
- for (unsigned i = n; i < m; i++)
- Eval(operandStack, kConcatenation); // a{3,5} -> a a aa?a?
- }
-
- for (unsigned i = 0; i < n - 1; i++)
- Eval(operandStack, kConcatenation); // a{3} -> aaa, a{3,} -> aaa+, a{3.5} -> aaaa?a?
-
- return true;
- }
-
- static SizeType Min(SizeType a, SizeType b) { return a < b ? a : b; }
-
- void CloneTopOperand(Stack<Allocator>& operandStack) {
- const Frag src = *operandStack.template Top<Frag>(); // Copy constructor to prevent invalidation
- SizeType count = stateCount_ - src.minIndex; // Assumes top operand contains states in [src->minIndex, stateCount_)
- State* s = states_.template Push<State>(count);
- memcpy(s, &GetState(src.minIndex), count * sizeof(State));
- for (SizeType j = 0; j < count; j++) {
- if (s[j].out != kRegexInvalidState)
- s[j].out += count;
- if (s[j].out1 != kRegexInvalidState)
- s[j].out1 += count;
- }
- *operandStack.template Push<Frag>() = Frag(src.start + count, src.out + count, src.minIndex + count);
- stateCount_ += count;
- }
-
- template <typename InputStream>
- bool ParseUnsigned(DecodedStream<InputStream, Encoding>& ds, unsigned* u) {
- unsigned r = 0;
- if (ds.Peek() < '0' || ds.Peek() > '9')
- return false;
- while (ds.Peek() >= '0' && ds.Peek() <= '9') {
- if (r >= 429496729 && ds.Peek() > '5') // 2^32 - 1 = 4294967295
- return false; // overflow
- r = r * 10 + (ds.Take() - '0');
- }
- *u = r;
- return true;
- }
-
- template <typename InputStream>
- bool ParseRange(DecodedStream<InputStream, Encoding>& ds, SizeType* range) {
- bool isBegin = true;
- bool negate = false;
- int step = 0;
- SizeType start = kRegexInvalidRange;
- SizeType current = kRegexInvalidRange;
- unsigned codepoint;
- while ((codepoint = ds.Take()) != 0) {
- if (isBegin) {
- isBegin = false;
- if (codepoint == '^') {
- negate = true;
- continue;
- }
- }
-
- switch (codepoint) {
- case ']':
- if (start == kRegexInvalidRange)
- return false; // Error: nothing inside []
- if (step == 2) { // Add trailing '-'
- SizeType r = NewRange('-');
- RAPIDJSON_ASSERT(current != kRegexInvalidRange);
- GetRange(current).next = r;
- }
- if (negate)
- GetRange(start).start |= kRangeNegationFlag;
- *range = start;
- return true;
-
- case '\\':
- if (ds.Peek() == 'b') {
- ds.Take();
- codepoint = 0x0008; // Escape backspace character
- }
- else if (!CharacterEscape(ds, &codepoint))
- return false;
- // fall through to default
-
- default:
- switch (step) {
- case 1:
- if (codepoint == '-') {
- step++;
- break;
- }
- // fall through to step 0 for other characters
-
- case 0:
- {
- SizeType r = NewRange(codepoint);
- if (current != kRegexInvalidRange)
- GetRange(current).next = r;
- if (start == kRegexInvalidRange)
- start = r;
- current = r;
- }
- step = 1;
- break;
-
- default:
- RAPIDJSON_ASSERT(step == 2);
- GetRange(current).end = codepoint;
- step = 0;
- }
- }
- }
- return false;
- }
-
- SizeType NewRange(unsigned codepoint) {
- Range* r = ranges_.template Push<Range>();
- r->start = r->end = codepoint;
- r->next = kRegexInvalidRange;
- return rangeCount_++;
- }
-
- template <typename InputStream>
- bool CharacterEscape(DecodedStream<InputStream, Encoding>& ds, unsigned* escapedCodepoint) {
- unsigned codepoint;
- switch (codepoint = ds.Take()) {
- case '^':
- case '$':
- case '|':
- case '(':
- case ')':
- case '?':
- case '*':
- case '+':
- case '.':
- case '[':
- case ']':
- case '{':
- case '}':
- case '\\':
- *escapedCodepoint = codepoint; return true;
- case 'f': *escapedCodepoint = 0x000C; return true;
- case 'n': *escapedCodepoint = 0x000A; return true;
- case 'r': *escapedCodepoint = 0x000D; return true;
- case 't': *escapedCodepoint = 0x0009; return true;
- case 'v': *escapedCodepoint = 0x000B; return true;
- default:
- return false; // Unsupported escape character
- }
- }
-
- Allocator* ownAllocator_;
- Allocator* allocator_;
- Stack<Allocator> states_;
- Stack<Allocator> ranges_;
- SizeType root_;
- SizeType stateCount_;
- SizeType rangeCount_;
-
- static const unsigned kInfinityQuantifier = ~0u;
-
- // For SearchWithAnchoring()
- bool anchorBegin_;
- bool anchorEnd_;
-};
-
-template <typename RegexType, typename Allocator = CrtAllocator>
-class GenericRegexSearch {
-public:
- typedef typename RegexType::EncodingType Encoding;
- typedef typename Encoding::Ch Ch;
-
- GenericRegexSearch(const RegexType& regex, Allocator* allocator = 0) :
- regex_(regex), allocator_(allocator), ownAllocator_(0),
- state0_(allocator, 0), state1_(allocator, 0), stateSet_()
- {
- RAPIDJSON_ASSERT(regex_.IsValid());
- if (!allocator_)
- ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)();
- stateSet_ = static_cast<unsigned*>(allocator_->Malloc(GetStateSetSize()));
- state0_.template Reserve<SizeType>(regex_.stateCount_);
- state1_.template Reserve<SizeType>(regex_.stateCount_);
- }
-
- ~GenericRegexSearch() {
- Allocator::Free(stateSet_);
- RAPIDJSON_DELETE(ownAllocator_);
- }
-
- template <typename InputStream>
- bool Match(InputStream& is) {
- return SearchWithAnchoring(is, true, true);
- }
-
- bool Match(const Ch* s) {
- GenericStringStream<Encoding> is(s);
- return Match(is);
- }
-
- template <typename InputStream>
- bool Search(InputStream& is) {
- return SearchWithAnchoring(is, regex_.anchorBegin_, regex_.anchorEnd_);
- }
-
- bool Search(const Ch* s) {
- GenericStringStream<Encoding> is(s);
- return Search(is);
- }
-
-private:
- typedef typename RegexType::State State;
- typedef typename RegexType::Range Range;
-
- template <typename InputStream>
- bool SearchWithAnchoring(InputStream& is, bool anchorBegin, bool anchorEnd) {
- DecodedStream<InputStream, Encoding> ds(is);
-
- state0_.Clear();
- Stack<Allocator> *current = &state0_, *next = &state1_;
- const size_t stateSetSize = GetStateSetSize();
- std::memset(stateSet_, 0, stateSetSize);
-
- bool matched = AddState(*current, regex_.root_);
- unsigned codepoint;
- while (!current->Empty() && (codepoint = ds.Take()) != 0) {
- std::memset(stateSet_, 0, stateSetSize);
- next->Clear();
- matched = false;
- for (const SizeType* s = current->template Bottom<SizeType>(); s != current->template End<SizeType>(); ++s) {
- const State& sr = regex_.GetState(*s);
- if (sr.codepoint == codepoint ||
- sr.codepoint == RegexType::kAnyCharacterClass ||
- (sr.codepoint == RegexType::kRangeCharacterClass && MatchRange(sr.rangeStart, codepoint)))
- {
- matched = AddState(*next, sr.out) || matched;
- if (!anchorEnd && matched)
- return true;
- }
- if (!anchorBegin)
- AddState(*next, regex_.root_);
- }
- internal::Swap(current, next);
- }
-
- return matched;
- }
-
- size_t GetStateSetSize() const {
- return (regex_.stateCount_ + 31) / 32 * 4;
- }
-
- // Return whether the added states is a match state
- bool AddState(Stack<Allocator>& l, SizeType index) {
- RAPIDJSON_ASSERT(index != kRegexInvalidState);
-
- const State& s = regex_.GetState(index);
- if (s.out1 != kRegexInvalidState) { // Split
- bool matched = AddState(l, s.out);
- return AddState(l, s.out1) || matched;
- }
- else if (!(stateSet_[index >> 5] & (1u << (index & 31)))) {
- stateSet_[index >> 5] |= (1u << (index & 31));
- *l.template PushUnsafe<SizeType>() = index;
- }
- return s.out == kRegexInvalidState; // by using PushUnsafe() above, we can ensure s is not validated due to reallocation.
- }
-
- bool MatchRange(SizeType rangeIndex, unsigned codepoint) const {
- bool yes = (regex_.GetRange(rangeIndex).start & RegexType::kRangeNegationFlag) == 0;
- while (rangeIndex != kRegexInvalidRange) {
- const Range& r = regex_.GetRange(rangeIndex);
- if (codepoint >= (r.start & ~RegexType::kRangeNegationFlag) && codepoint <= r.end)
- return yes;
- rangeIndex = r.next;
- }
- return !yes;
- }
-
- const RegexType& regex_;
- Allocator* allocator_;
- Allocator* ownAllocator_;
- Stack<Allocator> state0_;
- Stack<Allocator> state1_;
- uint32_t* stateSet_;
-};
-
-typedef GenericRegex<UTF8<> > Regex;
-typedef GenericRegexSearch<Regex> RegexSearch;
-
-} // namespace internal
-RAPIDJSON_NAMESPACE_END
-
-#ifdef __GNUC__
-RAPIDJSON_DIAG_POP
-#endif
-
-#if defined(__clang__) || defined(_MSC_VER)
-RAPIDJSON_DIAG_POP
-#endif
-
-#endif // RAPIDJSON_INTERNAL_REGEX_H_
diff --git a/src/native/external/rapidjson/internal/stack.h b/src/native/external/rapidjson/internal/stack.h
index 45dca6a8b09e..73abd706e976 100644
--- a/src/native/external/rapidjson/internal/stack.h
+++ b/src/native/external/rapidjson/internal/stack.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
diff --git a/src/native/external/rapidjson/internal/strfunc.h b/src/native/external/rapidjson/internal/strfunc.h
index 226439a76736..b698a8f43fa6 100644
--- a/src/native/external/rapidjson/internal/strfunc.h
+++ b/src/native/external/rapidjson/internal/strfunc.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
@@ -45,6 +45,20 @@ inline SizeType StrLen(const wchar_t* s) {
return SizeType(std::wcslen(s));
}
+//! Custom strcmpn() which works on different character types.
+/*! \tparam Ch Character type (e.g. char, wchar_t, short)
+ \param s1 Null-terminated input string.
+ \param s2 Null-terminated input string.
+ \return 0 if equal
+*/
+template<typename Ch>
+inline int StrCmp(const Ch* s1, const Ch* s2) {
+ RAPIDJSON_ASSERT(s1 != 0);
+ RAPIDJSON_ASSERT(s2 != 0);
+ while(*s1 && (*s1 == *s2)) { s1++; s2++; }
+ return static_cast<unsigned>(*s1) < static_cast<unsigned>(*s2) ? -1 : static_cast<unsigned>(*s1) > static_cast<unsigned>(*s2);
+}
+
//! Returns number of code points in a encoded string.
template<typename Encoding>
bool CountStringCodePoint(const typename Encoding::Ch* s, SizeType length, SizeType* outCount) {
diff --git a/src/native/external/rapidjson/internal/strtod.h b/src/native/external/rapidjson/internal/strtod.h
index dfca22b65ac0..55f0e380bfaa 100644
--- a/src/native/external/rapidjson/internal/strtod.h
+++ b/src/native/external/rapidjson/internal/strtod.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
@@ -128,17 +128,18 @@ inline bool StrtodFast(double d, int p, double* result) {
}
// Compute an approximation and see if it is within 1/2 ULP
-inline bool StrtodDiyFp(const char* decimals, int dLen, int dExp, double* result) {
+template<typename Ch>
+inline bool StrtodDiyFp(const Ch* decimals, int dLen, int dExp, double* result) {
uint64_t significand = 0;
int i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999
for (; i < dLen; i++) {
if (significand > RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) ||
- (significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5'))
+ (significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > Ch('5')))
break;
- significand = significand * 10u + static_cast<unsigned>(decimals[i] - '0');
+ significand = significand * 10u + static_cast<unsigned>(decimals[i] - Ch('0'));
}
- if (i < dLen && decimals[i] >= '5') // Rounding
+ if (i < dLen && decimals[i] >= Ch('5')) // Rounding
significand++;
int remaining = dLen - i;
@@ -205,7 +206,8 @@ inline bool StrtodDiyFp(const char* decimals, int dLen, int dExp, double* result
return halfWay - static_cast<unsigned>(error) >= precisionBits || precisionBits >= halfWay + static_cast<unsigned>(error);
}
-inline double StrtodBigInteger(double approx, const char* decimals, int dLen, int dExp) {
+template<typename Ch>
+inline double StrtodBigInteger(double approx, const Ch* decimals, int dLen, int dExp) {
RAPIDJSON_ASSERT(dLen >= 0);
const BigInteger dInt(decimals, static_cast<unsigned>(dLen));
Double a(approx);
@@ -223,7 +225,8 @@ inline double StrtodBigInteger(double approx, const char* decimals, int dLen, in
return a.NextPositiveDouble();
}
-inline double StrtodFullPrecision(double d, int p, const char* decimals, size_t length, size_t decimalPosition, int exp) {
+template<typename Ch>
+inline double StrtodFullPrecision(double d, int p, const Ch* decimals, size_t length, size_t decimalPosition, int exp) {
RAPIDJSON_ASSERT(d >= 0.0);
RAPIDJSON_ASSERT(length >= 1);
diff --git a/src/native/external/rapidjson/internal/swap.h b/src/native/external/rapidjson/internal/swap.h
index 666e49f97b68..2cf92f93a1d3 100644
--- a/src/native/external/rapidjson/internal/swap.h
+++ b/src/native/external/rapidjson/internal/swap.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
diff --git a/src/native/external/rapidjson/istreamwrapper.h b/src/native/external/rapidjson/istreamwrapper.h
index c4950b9dcf82..01437ec0127a 100644
--- a/src/native/external/rapidjson/istreamwrapper.h
+++ b/src/native/external/rapidjson/istreamwrapper.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
diff --git a/src/native/external/rapidjson/memorybuffer.h b/src/native/external/rapidjson/memorybuffer.h
deleted file mode 100644
index 39bee1dec1c0..000000000000
--- a/src/native/external/rapidjson/memorybuffer.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Tencent is pleased to support the open source community by making RapidJSON available.
-//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
-//
-// Licensed under the MIT License (the "License"); you may not use this file except
-// in compliance with the License. You may obtain a copy of the License at
-//
-// http://opensource.org/licenses/MIT
-//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations under the License.
-
-#ifndef RAPIDJSON_MEMORYBUFFER_H_
-#define RAPIDJSON_MEMORYBUFFER_H_
-
-#include "stream.h"
-#include "internal/stack.h"
-
-RAPIDJSON_NAMESPACE_BEGIN
-
-//! Represents an in-memory output byte stream.
-/*!
- This class is mainly for being wrapped by EncodedOutputStream or AutoUTFOutputStream.
-
- It is similar to FileWriteBuffer but the destination is an in-memory buffer instead of a file.
-
- Differences between MemoryBuffer and StringBuffer:
- 1. StringBuffer has Encoding but MemoryBuffer is only a byte buffer.
- 2. StringBuffer::GetString() returns a null-terminated string. MemoryBuffer::GetBuffer() returns a buffer without terminator.
-
- \tparam Allocator type for allocating memory buffer.
- \note implements Stream concept
-*/
-template <typename Allocator = CrtAllocator>
-struct GenericMemoryBuffer {
- typedef char Ch; // byte
-
- GenericMemoryBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {}
-
- void Put(Ch c) { *stack_.template Push<Ch>() = c; }
- void Flush() {}
-
- void Clear() { stack_.Clear(); }
- void ShrinkToFit() { stack_.ShrinkToFit(); }
- Ch* Push(size_t count) { return stack_.template Push<Ch>(count); }
- void Pop(size_t count) { stack_.template Pop<Ch>(count); }
-
- const Ch* GetBuffer() const {
- return stack_.template Bottom<Ch>();
- }
-
- size_t GetSize() const { return stack_.GetSize(); }
-
- static const size_t kDefaultCapacity = 256;
- mutable internal::Stack<Allocator> stack_;
-};
-
-typedef GenericMemoryBuffer<> MemoryBuffer;
-
-//! Implement specialized version of PutN() with memset() for better performance.
-template<>
-inline void PutN(MemoryBuffer& memoryBuffer, char c, size_t n) {
- std::memset(memoryBuffer.stack_.Push<char>(n), c, n * sizeof(c));
-}
-
-RAPIDJSON_NAMESPACE_END
-
-#endif // RAPIDJSON_MEMORYBUFFER_H_
diff --git a/src/native/external/rapidjson/memorystream.h b/src/native/external/rapidjson/memorystream.h
index 1d71d8a4f0e0..77af6c999e97 100644
--- a/src/native/external/rapidjson/memorystream.h
+++ b/src/native/external/rapidjson/memorystream.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
diff --git a/src/native/external/rapidjson/msinttypes/inttypes.h b/src/native/external/rapidjson/msinttypes/inttypes.h
deleted file mode 100644
index 18111286bf55..000000000000
--- a/src/native/external/rapidjson/msinttypes/inttypes.h
+++ /dev/null
@@ -1,316 +0,0 @@
-// ISO C9x compliant inttypes.h for Microsoft Visual Studio
-// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
-//
-// Copyright (c) 2006-2013 Alexander Chemeris
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the product nor the names of its contributors may
-// be used to endorse or promote products derived from this software
-// without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
-// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-///////////////////////////////////////////////////////////////////////////////
-
-// The above software in this distribution may have been modified by
-// THL A29 Limited ("Tencent Modifications").
-// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited.
-
-#ifndef _MSC_VER // [
-#error "Use this header only with Microsoft Visual C++ compilers!"
-#endif // _MSC_VER ]
-
-#ifndef _MSC_INTTYPES_H_ // [
-#define _MSC_INTTYPES_H_
-
-#if _MSC_VER > 1000
-#pragma once
-#endif
-
-#include "stdint.h"
-
-// miloyip: VC supports inttypes.h since VC2013
-#if _MSC_VER >= 1800
-#include <inttypes.h>
-#else
-
-// 7.8 Format conversion of integer types
-
-typedef struct {
- intmax_t quot;
- intmax_t rem;
-} imaxdiv_t;
-
-// 7.8.1 Macros for format specifiers
-
-#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
-
-// The fprintf macros for signed integers are:
-#define PRId8 "d"
-#define PRIi8 "i"
-#define PRIdLEAST8 "d"
-#define PRIiLEAST8 "i"
-#define PRIdFAST8 "d"
-#define PRIiFAST8 "i"
-
-#define PRId16 "hd"
-#define PRIi16 "hi"
-#define PRIdLEAST16 "hd"
-#define PRIiLEAST16 "hi"
-#define PRIdFAST16 "hd"
-#define PRIiFAST16 "hi"
-
-#define PRId32 "I32d"
-#define PRIi32 "I32i"
-#define PRIdLEAST32 "I32d"
-#define PRIiLEAST32 "I32i"
-#define PRIdFAST32 "I32d"
-#define PRIiFAST32 "I32i"
-
-#define PRId64 "I64d"
-#define PRIi64 "I64i"
-#define PRIdLEAST64 "I64d"
-#define PRIiLEAST64 "I64i"
-#define PRIdFAST64 "I64d"
-#define PRIiFAST64 "I64i"
-
-#define PRIdMAX "I64d"
-#define PRIiMAX "I64i"
-
-#define PRIdPTR "Id"
-#define PRIiPTR "Ii"
-
-// The fprintf macros for unsigned integers are:
-#define PRIo8 "o"
-#define PRIu8 "u"
-#define PRIx8 "x"
-#define PRIX8 "X"
-#define PRIoLEAST8 "o"
-#define PRIuLEAST8 "u"
-#define PRIxLEAST8 "x"
-#define PRIXLEAST8 "X"
-#define PRIoFAST8 "o"
-#define PRIuFAST8 "u"
-#define PRIxFAST8 "x"
-#define PRIXFAST8 "X"
-
-#define PRIo16 "ho"
-#define PRIu16 "hu"
-#define PRIx16 "hx"
-#define PRIX16 "hX"
-#define PRIoLEAST16 "ho"
-#define PRIuLEAST16 "hu"
-#define PRIxLEAST16 "hx"
-#define PRIXLEAST16 "hX"
-#define PRIoFAST16 "ho"
-#define PRIuFAST16 "hu"
-#define PRIxFAST16 "hx"
-#define PRIXFAST16 "hX"
-
-#define PRIo32 "I32o"
-#define PRIu32 "I32u"
-#define PRIx32 "I32x"
-#define PRIX32 "I32X"
-#define PRIoLEAST32 "I32o"
-#define PRIuLEAST32 "I32u"
-#define PRIxLEAST32 "I32x"
-#define PRIXLEAST32 "I32X"
-#define PRIoFAST32 "I32o"
-#define PRIuFAST32 "I32u"
-#define PRIxFAST32 "I32x"
-#define PRIXFAST32 "I32X"
-
-#define PRIo64 "I64o"
-#define PRIu64 "I64u"
-#define PRIx64 "I64x"
-#define PRIX64 "I64X"
-#define PRIoLEAST64 "I64o"
-#define PRIuLEAST64 "I64u"
-#define PRIxLEAST64 "I64x"
-#define PRIXLEAST64 "I64X"
-#define PRIoFAST64 "I64o"
-#define PRIuFAST64 "I64u"
-#define PRIxFAST64 "I64x"
-#define PRIXFAST64 "I64X"
-
-#define PRIoMAX "I64o"
-#define PRIuMAX "I64u"
-#define PRIxMAX "I64x"
-#define PRIXMAX "I64X"
-
-#define PRIoPTR "Io"
-#define PRIuPTR "Iu"
-#define PRIxPTR "Ix"
-#define PRIXPTR "IX"
-
-// The fscanf macros for signed integers are:
-#define SCNd8 "d"
-#define SCNi8 "i"
-#define SCNdLEAST8 "d"
-#define SCNiLEAST8 "i"
-#define SCNdFAST8 "d"
-#define SCNiFAST8 "i"
-
-#define SCNd16 "hd"
-#define SCNi16 "hi"
-#define SCNdLEAST16 "hd"
-#define SCNiLEAST16 "hi"
-#define SCNdFAST16 "hd"
-#define SCNiFAST16 "hi"
-
-#define SCNd32 "ld"
-#define SCNi32 "li"
-#define SCNdLEAST32 "ld"
-#define SCNiLEAST32 "li"
-#define SCNdFAST32 "ld"
-#define SCNiFAST32 "li"
-
-#define SCNd64 "I64d"
-#define SCNi64 "I64i"
-#define SCNdLEAST64 "I64d"
-#define SCNiLEAST64 "I64i"
-#define SCNdFAST64 "I64d"
-#define SCNiFAST64 "I64i"
-
-#define SCNdMAX "I64d"
-#define SCNiMAX "I64i"
-
-#ifdef _WIN64 // [
-# define SCNdPTR "I64d"
-# define SCNiPTR "I64i"
-#else // _WIN64 ][
-# define SCNdPTR "ld"
-# define SCNiPTR "li"
-#endif // _WIN64 ]
-
-// The fscanf macros for unsigned integers are:
-#define SCNo8 "o"
-#define SCNu8 "u"
-#define SCNx8 "x"
-#define SCNX8 "X"
-#define SCNoLEAST8 "o"
-#define SCNuLEAST8 "u"
-#define SCNxLEAST8 "x"
-#define SCNXLEAST8 "X"
-#define SCNoFAST8 "o"
-#define SCNuFAST8 "u"
-#define SCNxFAST8 "x"
-#define SCNXFAST8 "X"
-
-#define SCNo16 "ho"
-#define SCNu16 "hu"
-#define SCNx16 "hx"
-#define SCNX16 "hX"
-#define SCNoLEAST16 "ho"
-#define SCNuLEAST16 "hu"
-#define SCNxLEAST16 "hx"
-#define SCNXLEAST16 "hX"
-#define SCNoFAST16 "ho"
-#define SCNuFAST16 "hu"
-#define SCNxFAST16 "hx"
-#define SCNXFAST16 "hX"
-
-#define SCNo32 "lo"
-#define SCNu32 "lu"
-#define SCNx32 "lx"
-#define SCNX32 "lX"
-#define SCNoLEAST32 "lo"
-#define SCNuLEAST32 "lu"
-#define SCNxLEAST32 "lx"
-#define SCNXLEAST32 "lX"
-#define SCNoFAST32 "lo"
-#define SCNuFAST32 "lu"
-#define SCNxFAST32 "lx"
-#define SCNXFAST32 "lX"
-
-#define SCNo64 "I64o"
-#define SCNu64 "I64u"
-#define SCNx64 "I64x"
-#define SCNX64 "I64X"
-#define SCNoLEAST64 "I64o"
-#define SCNuLEAST64 "I64u"
-#define SCNxLEAST64 "I64x"
-#define SCNXLEAST64 "I64X"
-#define SCNoFAST64 "I64o"
-#define SCNuFAST64 "I64u"
-#define SCNxFAST64 "I64x"
-#define SCNXFAST64 "I64X"
-
-#define SCNoMAX "I64o"
-#define SCNuMAX "I64u"
-#define SCNxMAX "I64x"
-#define SCNXMAX "I64X"
-
-#ifdef _WIN64 // [
-# define SCNoPTR "I64o"
-# define SCNuPTR "I64u"
-# define SCNxPTR "I64x"
-# define SCNXPTR "I64X"
-#else // _WIN64 ][
-# define SCNoPTR "lo"
-# define SCNuPTR "lu"
-# define SCNxPTR "lx"
-# define SCNXPTR "lX"
-#endif // _WIN64 ]
-
-#endif // __STDC_FORMAT_MACROS ]
-
-// 7.8.2 Functions for greatest-width integer types
-
-// 7.8.2.1 The imaxabs function
-#define imaxabs _abs64
-
-// 7.8.2.2 The imaxdiv function
-
-// This is modified version of div() function from Microsoft's div.c found
-// in %MSVC.NET%\crt\src\div.c
-#ifdef STATIC_IMAXDIV // [
-static
-#else // STATIC_IMAXDIV ][
-_inline
-#endif // STATIC_IMAXDIV ]
-imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
-{
- imaxdiv_t result;
-
- result.quot = numer / denom;
- result.rem = numer % denom;
-
- if (numer < 0 && result.rem > 0) {
- // did division wrong; must fix up
- ++result.quot;
- result.rem -= denom;
- }
-
- return result;
-}
-
-// 7.8.2.3 The strtoimax and strtoumax functions
-#define strtoimax _strtoi64
-#define strtoumax _strtoui64
-
-// 7.8.2.4 The wcstoimax and wcstoumax functions
-#define wcstoimax _wcstoi64
-#define wcstoumax _wcstoui64
-
-#endif // _MSC_VER >= 1800
-
-#endif // _MSC_INTTYPES_H_ ]
diff --git a/src/native/external/rapidjson/msinttypes/stdint.h b/src/native/external/rapidjson/msinttypes/stdint.h
deleted file mode 100644
index 3d4477b9a024..000000000000
--- a/src/native/external/rapidjson/msinttypes/stdint.h
+++ /dev/null
@@ -1,300 +0,0 @@
-// ISO C9x compliant stdint.h for Microsoft Visual Studio
-// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
-//
-// Copyright (c) 2006-2013 Alexander Chemeris
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the product nor the names of its contributors may
-// be used to endorse or promote products derived from this software
-// without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
-// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-///////////////////////////////////////////////////////////////////////////////
-
-// The above software in this distribution may have been modified by
-// THL A29 Limited ("Tencent Modifications").
-// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited.
-
-#ifndef _MSC_VER // [
-#error "Use this header only with Microsoft Visual C++ compilers!"
-#endif // _MSC_VER ]
-
-#ifndef _MSC_STDINT_H_ // [
-#define _MSC_STDINT_H_
-
-#if _MSC_VER > 1000
-#pragma once
-#endif
-
-// miloyip: Originally Visual Studio 2010 uses its own stdint.h. However it generates warning with INT64_C(), so change to use this file for vs2010.
-#if _MSC_VER >= 1600 // [
-#include <stdint.h>
-
-#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
-
-#undef INT8_C
-#undef INT16_C
-#undef INT32_C
-#undef INT64_C
-#undef UINT8_C
-#undef UINT16_C
-#undef UINT32_C
-#undef UINT64_C
-
-// 7.18.4.1 Macros for minimum-width integer constants
-
-#define INT8_C(val) val##i8
-#define INT16_C(val) val##i16
-#define INT32_C(val) val##i32
-#define INT64_C(val) val##i64
-
-#define UINT8_C(val) val##ui8
-#define UINT16_C(val) val##ui16
-#define UINT32_C(val) val##ui32
-#define UINT64_C(val) val##ui64
-
-// 7.18.4.2 Macros for greatest-width integer constants
-// These #ifndef's are needed to prevent collisions with <boost/cstdint.hpp>.
-// Check out Issue 9 for the details.
-#ifndef INTMAX_C // [
-# define INTMAX_C INT64_C
-#endif // INTMAX_C ]
-#ifndef UINTMAX_C // [
-# define UINTMAX_C UINT64_C
-#endif // UINTMAX_C ]
-
-#endif // __STDC_CONSTANT_MACROS ]
-
-#else // ] _MSC_VER >= 1700 [
-
-#include <limits.h>
-
-// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
-// compiling for ARM we have to wrap <wchar.h> include with 'extern "C++" {}'
-// or compiler would give many errors like this:
-// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
-#if defined(__cplusplus) && !defined(_M_ARM)
-extern "C" {
-#endif
-# include <wchar.h>
-#if defined(__cplusplus) && !defined(_M_ARM)
-}
-#endif
-
-// Define _W64 macros to mark types changing their size, like intptr_t.
-#ifndef _W64
-# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
-# define _W64 __w64
-# else
-# define _W64
-# endif
-#endif
-
-
-// 7.18.1 Integer types
-
-// 7.18.1.1 Exact-width integer types
-
-// Visual Studio 6 and Embedded Visual C++ 4 doesn't
-// realize that, e.g. char has the same size as __int8
-// so we give up on __intX for them.
-#if (_MSC_VER < 1300)
- typedef signed char int8_t;
- typedef signed short int16_t;
- typedef signed int int32_t;
- typedef unsigned char uint8_t;
- typedef unsigned short uint16_t;
- typedef unsigned int uint32_t;
-#else
- typedef signed __int8 int8_t;
- typedef signed __int16 int16_t;
- typedef signed __int32 int32_t;
- typedef unsigned __int8 uint8_t;
- typedef unsigned __int16 uint16_t;
- typedef unsigned __int32 uint32_t;
-#endif
-typedef signed __int64 int64_t;
-typedef unsigned __int64 uint64_t;
-
-
-// 7.18.1.2 Minimum-width integer types
-typedef int8_t int_least8_t;
-typedef int16_t int_least16_t;
-typedef int32_t int_least32_t;
-typedef int64_t int_least64_t;
-typedef uint8_t uint_least8_t;
-typedef uint16_t uint_least16_t;
-typedef uint32_t uint_least32_t;
-typedef uint64_t uint_least64_t;
-
-// 7.18.1.3 Fastest minimum-width integer types
-typedef int8_t int_fast8_t;
-typedef int16_t int_fast16_t;
-typedef int32_t int_fast32_t;
-typedef int64_t int_fast64_t;
-typedef uint8_t uint_fast8_t;
-typedef uint16_t uint_fast16_t;
-typedef uint32_t uint_fast32_t;
-typedef uint64_t uint_fast64_t;
-
-// 7.18.1.4 Integer types capable of holding object pointers
-#ifdef _WIN64 // [
- typedef signed __int64 intptr_t;
- typedef unsigned __int64 uintptr_t;
-#else // _WIN64 ][
- typedef _W64 signed int intptr_t;
- typedef _W64 unsigned int uintptr_t;
-#endif // _WIN64 ]
-
-// 7.18.1.5 Greatest-width integer types
-typedef int64_t intmax_t;
-typedef uint64_t uintmax_t;
-
-
-// 7.18.2 Limits of specified-width integer types
-
-#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
-
-// 7.18.2.1 Limits of exact-width integer types
-#define INT8_MIN ((int8_t)_I8_MIN)
-#define INT8_MAX _I8_MAX
-#define INT16_MIN ((int16_t)_I16_MIN)
-#define INT16_MAX _I16_MAX
-#define INT32_MIN ((int32_t)_I32_MIN)
-#define INT32_MAX _I32_MAX
-#define INT64_MIN ((int64_t)_I64_MIN)
-#define INT64_MAX _I64_MAX
-#define UINT8_MAX _UI8_MAX
-#define UINT16_MAX _UI16_MAX
-#define UINT32_MAX _UI32_MAX
-#define UINT64_MAX _UI64_MAX
-
-// 7.18.2.2 Limits of minimum-width integer types
-#define INT_LEAST8_MIN INT8_MIN
-#define INT_LEAST8_MAX INT8_MAX
-#define INT_LEAST16_MIN INT16_MIN
-#define INT_LEAST16_MAX INT16_MAX
-#define INT_LEAST32_MIN INT32_MIN
-#define INT_LEAST32_MAX INT32_MAX
-#define INT_LEAST64_MIN INT64_MIN
-#define INT_LEAST64_MAX INT64_MAX
-#define UINT_LEAST8_MAX UINT8_MAX
-#define UINT_LEAST16_MAX UINT16_MAX
-#define UINT_LEAST32_MAX UINT32_MAX
-#define UINT_LEAST64_MAX UINT64_MAX
-
-// 7.18.2.3 Limits of fastest minimum-width integer types
-#define INT_FAST8_MIN INT8_MIN
-#define INT_FAST8_MAX INT8_MAX
-#define INT_FAST16_MIN INT16_MIN
-#define INT_FAST16_MAX INT16_MAX
-#define INT_FAST32_MIN INT32_MIN
-#define INT_FAST32_MAX INT32_MAX
-#define INT_FAST64_MIN INT64_MIN
-#define INT_FAST64_MAX INT64_MAX
-#define UINT_FAST8_MAX UINT8_MAX
-#define UINT_FAST16_MAX UINT16_MAX
-#define UINT_FAST32_MAX UINT32_MAX
-#define UINT_FAST64_MAX UINT64_MAX
-
-// 7.18.2.4 Limits of integer types capable of holding object pointers
-#ifdef _WIN64 // [
-# define INTPTR_MIN INT64_MIN
-# define INTPTR_MAX INT64_MAX
-# define UINTPTR_MAX UINT64_MAX
-#else // _WIN64 ][
-# define INTPTR_MIN INT32_MIN
-# define INTPTR_MAX INT32_MAX
-# define UINTPTR_MAX UINT32_MAX
-#endif // _WIN64 ]
-
-// 7.18.2.5 Limits of greatest-width integer types
-#define INTMAX_MIN INT64_MIN
-#define INTMAX_MAX INT64_MAX
-#define UINTMAX_MAX UINT64_MAX
-
-// 7.18.3 Limits of other integer types
-
-#ifdef _WIN64 // [
-# define PTRDIFF_MIN _I64_MIN
-# define PTRDIFF_MAX _I64_MAX
-#else // _WIN64 ][
-# define PTRDIFF_MIN _I32_MIN
-# define PTRDIFF_MAX _I32_MAX
-#endif // _WIN64 ]
-
-#define SIG_ATOMIC_MIN INT_MIN
-#define SIG_ATOMIC_MAX INT_MAX
-
-#ifndef SIZE_MAX // [
-# ifdef _WIN64 // [
-# define SIZE_MAX _UI64_MAX
-# else // _WIN64 ][
-# define SIZE_MAX _UI32_MAX
-# endif // _WIN64 ]
-#endif // SIZE_MAX ]
-
-// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
-#ifndef WCHAR_MIN // [
-# define WCHAR_MIN 0
-#endif // WCHAR_MIN ]
-#ifndef WCHAR_MAX // [
-# define WCHAR_MAX _UI16_MAX
-#endif // WCHAR_MAX ]
-
-#define WINT_MIN 0
-#define WINT_MAX _UI16_MAX
-
-#endif // __STDC_LIMIT_MACROS ]
-
-
-// 7.18.4 Limits of other integer types
-
-#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
-
-// 7.18.4.1 Macros for minimum-width integer constants
-
-#define INT8_C(val) val##i8
-#define INT16_C(val) val##i16
-#define INT32_C(val) val##i32
-#define INT64_C(val) val##i64
-
-#define UINT8_C(val) val##ui8
-#define UINT16_C(val) val##ui16
-#define UINT32_C(val) val##ui32
-#define UINT64_C(val) val##ui64
-
-// 7.18.4.2 Macros for greatest-width integer constants
-// These #ifndef's are needed to prevent collisions with <boost/cstdint.hpp>.
-// Check out Issue 9 for the details.
-#ifndef INTMAX_C // [
-# define INTMAX_C INT64_C
-#endif // INTMAX_C ]
-#ifndef UINTMAX_C // [
-# define UINTMAX_C UINT64_C
-#endif // UINTMAX_C ]
-
-#endif // __STDC_CONSTANT_MACROS ]
-
-#endif // _MSC_VER >= 1600 ]
-
-#endif // _MSC_STDINT_H_ ]
diff --git a/src/native/external/rapidjson/ostreamwrapper.h b/src/native/external/rapidjson/ostreamwrapper.h
deleted file mode 100644
index 6f4667c08ad7..000000000000
--- a/src/native/external/rapidjson/ostreamwrapper.h
+++ /dev/null
@@ -1,81 +0,0 @@
-// Tencent is pleased to support the open source community by making RapidJSON available.
-//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
-//
-// Licensed under the MIT License (the "License"); you may not use this file except
-// in compliance with the License. You may obtain a copy of the License at
-//
-// http://opensource.org/licenses/MIT
-//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations under the License.
-
-#ifndef RAPIDJSON_OSTREAMWRAPPER_H_
-#define RAPIDJSON_OSTREAMWRAPPER_H_
-
-#include "stream.h"
-#include <iosfwd>
-
-#ifdef __clang__
-RAPIDJSON_DIAG_PUSH
-RAPIDJSON_DIAG_OFF(padded)
-#endif
-
-RAPIDJSON_NAMESPACE_BEGIN
-
-//! Wrapper of \c std::basic_ostream into RapidJSON's Stream concept.
-/*!
- The classes can be wrapped including but not limited to:
-
- - \c std::ostringstream
- - \c std::stringstream
- - \c std::wpstringstream
- - \c std::wstringstream
- - \c std::ifstream
- - \c std::fstream
- - \c std::wofstream
- - \c std::wfstream
-
- \tparam StreamType Class derived from \c std::basic_ostream.
-*/
-
-template <typename StreamType>
-class BasicOStreamWrapper {
-public:
- typedef typename StreamType::char_type Ch;
- BasicOStreamWrapper(StreamType& stream) : stream_(stream) {}
-
- void Put(Ch c) {
- stream_.put(c);
- }
-
- void Flush() {
- stream_.flush();
- }
-
- // Not implemented
- char Peek() const { RAPIDJSON_ASSERT(false); return 0; }
- char Take() { RAPIDJSON_ASSERT(false); return 0; }
- size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
- char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
- size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; }
-
-private:
- BasicOStreamWrapper(const BasicOStreamWrapper&);
- BasicOStreamWrapper& operator=(const BasicOStreamWrapper&);
-
- StreamType& stream_;
-};
-
-typedef BasicOStreamWrapper<std::ostream> OStreamWrapper;
-typedef BasicOStreamWrapper<std::wostream> WOStreamWrapper;
-
-#ifdef __clang__
-RAPIDJSON_DIAG_POP
-#endif
-
-RAPIDJSON_NAMESPACE_END
-
-#endif // RAPIDJSON_OSTREAMWRAPPER_H_
diff --git a/src/native/external/rapidjson/pointer.h b/src/native/external/rapidjson/pointer.h
deleted file mode 100644
index 063abab9a170..000000000000
--- a/src/native/external/rapidjson/pointer.h
+++ /dev/null
@@ -1,1414 +0,0 @@
-// Tencent is pleased to support the open source community by making RapidJSON available.
-//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
-//
-// Licensed under the MIT License (the "License"); you may not use this file except
-// in compliance with the License. You may obtain a copy of the License at
-//
-// http://opensource.org/licenses/MIT
-//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations under the License.
-
-#ifndef RAPIDJSON_POINTER_H_
-#define RAPIDJSON_POINTER_H_
-
-#include "document.h"
-#include "internal/itoa.h"
-
-#ifdef __clang__
-RAPIDJSON_DIAG_PUSH
-RAPIDJSON_DIAG_OFF(switch-enum)
-#elif defined(_MSC_VER)
-RAPIDJSON_DIAG_PUSH
-RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
-#endif
-
-RAPIDJSON_NAMESPACE_BEGIN
-
-static const SizeType kPointerInvalidIndex = ~SizeType(0); //!< Represents an invalid index in GenericPointer::Token
-
-//! Error code of parsing.
-/*! \ingroup RAPIDJSON_ERRORS
- \see GenericPointer::GenericPointer, GenericPointer::GetParseErrorCode
-*/
-enum PointerParseErrorCode {
- kPointerParseErrorNone = 0, //!< The parse is successful
-
- kPointerParseErrorTokenMustBeginWithSolidus, //!< A token must begin with a '/'
- kPointerParseErrorInvalidEscape, //!< Invalid escape
- kPointerParseErrorInvalidPercentEncoding, //!< Invalid percent encoding in URI fragment
- kPointerParseErrorCharacterMustPercentEncode //!< A character must percent encoded in URI fragment
-};
-
-///////////////////////////////////////////////////////////////////////////////
-// GenericPointer
-
-//! Represents a JSON Pointer. Use Pointer for UTF8 encoding and default allocator.
-/*!
- This class implements RFC 6901 "JavaScript Object Notation (JSON) Pointer"
- (https://tools.ietf.org/html/rfc6901).
-
- A JSON pointer is for identifying a specific value in a JSON document
- (GenericDocument). It can simplify coding of DOM tree manipulation, because it
- can access multiple-level depth of DOM tree with single API call.
-
- After it parses a string representation (e.g. "/foo/0" or URI fragment
- representation (e.g. "#/foo/0") into its internal representation (tokens),
- it can be used to resolve a specific value in multiple documents, or sub-tree
- of documents.
-
- Contrary to GenericValue, Pointer can be copy constructed and copy assigned.
- Apart from assignment, a Pointer cannot be modified after construction.
-
- Although Pointer is very convenient, please aware that constructing Pointer
- involves parsing and dynamic memory allocation. A special constructor with user-
- supplied tokens eliminates these.
-
- GenericPointer depends on GenericDocument and GenericValue.
-
- \tparam ValueType The value type of the DOM tree. E.g. GenericValue<UTF8<> >
- \tparam Allocator The allocator type for allocating memory for internal representation.
-
- \note GenericPointer uses same encoding of ValueType.
- However, Allocator of GenericPointer is independent of Allocator of Value.
-*/
-template <typename ValueType, typename Allocator = CrtAllocator>
-class GenericPointer {
-public:
- typedef typename ValueType::EncodingType EncodingType; //!< Encoding type from Value
- typedef typename ValueType::Ch Ch; //!< Character type from Value
-
- //! A token is the basic units of internal representation.
- /*!
- A JSON pointer string representation "/foo/123" is parsed to two tokens:
- "foo" and 123. 123 will be represented in both numeric form and string form.
- They are resolved according to the actual value type (object or array).
-
- For token that are not numbers, or the numeric value is out of bound
- (greater than limits of SizeType), they are only treated as string form
- (i.e. the token's index will be equal to kPointerInvalidIndex).
-
- This struct is public so that user can create a Pointer without parsing and
- allocation, using a special constructor.
- */
- struct Token {
- const Ch* name; //!< Name of the token. It has null character at the end but it can contain null character.
- SizeType length; //!< Length of the name.
- SizeType index; //!< A valid array index, if it is not equal to kPointerInvalidIndex.
- };
-
- //!@name Constructors and destructor.
- //@{
-
- //! Default constructor.
- GenericPointer(Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {}
-
- //! Constructor that parses a string or URI fragment representation.
- /*!
- \param source A null-terminated, string or URI fragment representation of JSON pointer.
- \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one.
- */
- explicit GenericPointer(const Ch* source, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {
- Parse(source, internal::StrLen(source));
- }
-
-#if RAPIDJSON_HAS_STDSTRING
- //! Constructor that parses a string or URI fragment representation.
- /*!
- \param source A string or URI fragment representation of JSON pointer.
- \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one.
- \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING.
- */
- explicit GenericPointer(const std::basic_string<Ch>& source, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {
- Parse(source.c_str(), source.size());
- }
-#endif
-
- //! Constructor that parses a string or URI fragment representation, with length of the source string.
- /*!
- \param source A string or URI fragment representation of JSON pointer.
- \param length Length of source.
- \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one.
- \note Slightly faster than the overload without length.
- */
- GenericPointer(const Ch* source, size_t length, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {
- Parse(source, length);
- }
-
- //! Constructor with user-supplied tokens.
- /*!
- This constructor let user supplies const array of tokens.
- This prevents the parsing process and eliminates allocation.
- This is preferred for memory constrained environments.
-
- \param tokens An constant array of tokens representing the JSON pointer.
- \param tokenCount Number of tokens.
-
- \b Example
- \code
- #define NAME(s) { s, sizeof(s) / sizeof(s[0]) - 1, kPointerInvalidIndex }
- #define INDEX(i) { #i, sizeof(#i) - 1, i }
-
- static const Pointer::Token kTokens[] = { NAME("foo"), INDEX(123) };
- static const Pointer p(kTokens, sizeof(kTokens) / sizeof(kTokens[0]));
- // Equivalent to static const Pointer p("/foo/123");
-
- #undef NAME
- #undef INDEX
- \endcode
- */
- GenericPointer(const Token* tokens, size_t tokenCount) : allocator_(), ownAllocator_(), nameBuffer_(), tokens_(const_cast<Token*>(tokens)), tokenCount_(tokenCount), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {}
-
- //! Copy constructor.
- GenericPointer(const GenericPointer& rhs) : allocator_(rhs.allocator_), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {
- *this = rhs;
- }
-
- //! Copy constructor.
- GenericPointer(const GenericPointer& rhs, Allocator* allocator) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {
- *this = rhs;
- }
-
- //! Destructor.
- ~GenericPointer() {
- if (nameBuffer_) // If user-supplied tokens constructor is used, nameBuffer_ is nullptr and tokens_ are not deallocated.
- Allocator::Free(tokens_);
- RAPIDJSON_DELETE(ownAllocator_);
- }
-
- //! Assignment operator.
- GenericPointer& operator=(const GenericPointer& rhs) {
- if (this != &rhs) {
- // Do not delete ownAllcator
- if (nameBuffer_)
- Allocator::Free(tokens_);
-
- tokenCount_ = rhs.tokenCount_;
- parseErrorOffset_ = rhs.parseErrorOffset_;
- parseErrorCode_ = rhs.parseErrorCode_;
-
- if (rhs.nameBuffer_)
- CopyFromRaw(rhs); // Normally parsed tokens.
- else {
- tokens_ = rhs.tokens_; // User supplied const tokens.
- nameBuffer_ = 0;
- }
- }
- return *this;
- }
-
- //! Swap the content of this pointer with an other.
- /*!
- \param other The pointer to swap with.
- \note Constant complexity.
- */
- GenericPointer& Swap(GenericPointer& other) RAPIDJSON_NOEXCEPT {
- internal::Swap(allocator_, other.allocator_);
- internal::Swap(ownAllocator_, other.ownAllocator_);
- internal::Swap(nameBuffer_, other.nameBuffer_);
- internal::Swap(tokens_, other.tokens_);
- internal::Swap(tokenCount_, other.tokenCount_);
- internal::Swap(parseErrorOffset_, other.parseErrorOffset_);
- internal::Swap(parseErrorCode_, other.parseErrorCode_);
- return *this;
- }
-
- //! free-standing swap function helper
- /*!
- Helper function to enable support for common swap implementation pattern based on \c std::swap:
- \code
- void swap(MyClass& a, MyClass& b) {
- using std::swap;
- swap(a.pointer, b.pointer);
- // ...
- }
- \endcode
- \see Swap()
- */
- friend inline void swap(GenericPointer& a, GenericPointer& b) RAPIDJSON_NOEXCEPT { a.Swap(b); }
-
- //@}
-
- //!@name Append token
- //@{
-
- //! Append a token and return a new Pointer
- /*!
- \param token Token to be appended.
- \param allocator Allocator for the newly return Pointer.
- \return A new Pointer with appended token.
- */
- GenericPointer Append(const Token& token, Allocator* allocator = 0) const {
- GenericPointer r;
- r.allocator_ = allocator;
- Ch *p = r.CopyFromRaw(*this, 1, token.length + 1);
- std::memcpy(p, token.name, (token.length + 1) * sizeof(Ch));
- r.tokens_[tokenCount_].name = p;
- r.tokens_[tokenCount_].length = token.length;
- r.tokens_[tokenCount_].index = token.index;
- return r;
- }
-
- //! Append a name token with length, and return a new Pointer
- /*!
- \param name Name to be appended.
- \param length Length of name.
- \param allocator Allocator for the newly return Pointer.
- \return A new Pointer with appended token.
- */
- GenericPointer Append(const Ch* name, SizeType length, Allocator* allocator = 0) const {
- Token token = { name, length, kPointerInvalidIndex };
- return Append(token, allocator);
- }
-
- //! Append a name token without length, and return a new Pointer
- /*!
- \param name Name (const Ch*) to be appended.
- \param allocator Allocator for the newly return Pointer.
- \return A new Pointer with appended token.
- */
- template <typename T>
- RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr<internal::IsSame<typename internal::RemoveConst<T>::Type, Ch> >), (GenericPointer))
- Append(T* name, Allocator* allocator = 0) const {
- return Append(name, internal::StrLen(name), allocator);
- }
-
-#if RAPIDJSON_HAS_STDSTRING
- //! Append a name token, and return a new Pointer
- /*!
- \param name Name to be appended.
- \param allocator Allocator for the newly return Pointer.
- \return A new Pointer with appended token.
- */
- GenericPointer Append(const std::basic_string<Ch>& name, Allocator* allocator = 0) const {
- return Append(name.c_str(), static_cast<SizeType>(name.size()), allocator);
- }
-#endif
-
- //! Append a index token, and return a new Pointer
- /*!
- \param index Index to be appended.
- \param allocator Allocator for the newly return Pointer.
- \return A new Pointer with appended token.
- */
- GenericPointer Append(SizeType index, Allocator* allocator = 0) const {
- char buffer[21];
- char* end = sizeof(SizeType) == 4 ? internal::u32toa(index, buffer) : internal::u64toa(index, buffer);
- SizeType length = static_cast<SizeType>(end - buffer);
- buffer[length] = '\0';
-
- if (sizeof(Ch) == 1) {
- Token token = { reinterpret_cast<Ch*>(buffer), length, index };
- return Append(token, allocator);
- }
- else {
- Ch name[21];
- for (size_t i = 0; i <= length; i++)
- name[i] = static_cast<Ch>(buffer[i]);
- Token token = { name, length, index };
- return Append(token, allocator);
- }
- }
-
- //! Append a token by value, and return a new Pointer
- /*!
- \param token token to be appended.
- \param allocator Allocator for the newly return Pointer.
- \return A new Pointer with appended token.
- */
- GenericPointer Append(const ValueType& token, Allocator* allocator = 0) const {
- if (token.IsString())
- return Append(token.GetString(), token.GetStringLength(), allocator);
- else {
- RAPIDJSON_ASSERT(token.IsUint64());
- RAPIDJSON_ASSERT(token.GetUint64() <= SizeType(~0));
- return Append(static_cast<SizeType>(token.GetUint64()), allocator);
- }
- }
-
- //!@name Handling Parse Error
- //@{
-
- //! Check whether this is a valid pointer.
- bool IsValid() const { return parseErrorCode_ == kPointerParseErrorNone; }
-
- //! Get the parsing error offset in code unit.
- size_t GetParseErrorOffset() const { return parseErrorOffset_; }
-
- //! Get the parsing error code.
- PointerParseErrorCode GetParseErrorCode() const { return parseErrorCode_; }
-
- //@}
-
- //! Get the allocator of this pointer.
- Allocator& GetAllocator() { return *allocator_; }
-
- //!@name Tokens
- //@{
-
- //! Get the token array (const version only).
- const Token* GetTokens() const { return tokens_; }
-
- //! Get the number of tokens.
- size_t GetTokenCount() const { return tokenCount_; }
-
- //@}
-
- //!@name Equality/inequality operators
- //@{
-
- //! Equality operator.
- /*!
- \note When any pointers are invalid, always returns false.
- */
- bool operator==(const GenericPointer& rhs) const {
- if (!IsValid() || !rhs.IsValid() || tokenCount_ != rhs.tokenCount_)
- return false;
-
- for (size_t i = 0; i < tokenCount_; i++) {
- if (tokens_[i].index != rhs.tokens_[i].index ||
- tokens_[i].length != rhs.tokens_[i].length ||
- (tokens_[i].length != 0 && std::memcmp(tokens_[i].name, rhs.tokens_[i].name, sizeof(Ch)* tokens_[i].length) != 0))
- {
- return false;
- }
- }
-
- return true;
- }
-
- //! Inequality operator.
- /*!
- \note When any pointers are invalid, always returns true.
- */
- bool operator!=(const GenericPointer& rhs) const { return !(*this == rhs); }
-
- //! Less than operator.
- /*!
- \note Invalid pointers are always greater than valid ones.
- */
- bool operator<(const GenericPointer& rhs) const {
- if (!IsValid())
- return false;
- if (!rhs.IsValid())
- return true;
-
- if (tokenCount_ != rhs.tokenCount_)
- return tokenCount_ < rhs.tokenCount_;
-
- for (size_t i = 0; i < tokenCount_; i++) {
- if (tokens_[i].index != rhs.tokens_[i].index)
- return tokens_[i].index < rhs.tokens_[i].index;
-
- if (tokens_[i].length != rhs.tokens_[i].length)
- return tokens_[i].length < rhs.tokens_[i].length;
-
- if (int cmp = std::memcmp(tokens_[i].name, rhs.tokens_[i].name, sizeof(Ch) * tokens_[i].length))
- return cmp < 0;
- }
-
- return false;
- }
-
- //@}
-
- //!@name Stringify
- //@{
-
- //! Stringify the pointer into string representation.
- /*!
- \tparam OutputStream Type of output stream.
- \param os The output stream.
- */
- template<typename OutputStream>
- bool Stringify(OutputStream& os) const {
- return Stringify<false, OutputStream>(os);
- }
-
- //! Stringify the pointer into URI fragment representation.
- /*!
- \tparam OutputStream Type of output stream.
- \param os The output stream.
- */
- template<typename OutputStream>
- bool StringifyUriFragment(OutputStream& os) const {
- return Stringify<true, OutputStream>(os);
- }
-
- //@}
-
- //!@name Create value
- //@{
-
- //! Create a value in a subtree.
- /*!
- If the value is not exist, it creates all parent values and a JSON Null value.
- So it always succeed and return the newly created or existing value.
-
- Remind that it may change types of parents according to tokens, so it
- potentially removes previously stored values. For example, if a document
- was an array, and "/foo" is used to create a value, then the document
- will be changed to an object, and all existing array elements are lost.
-
- \param root Root value of a DOM subtree to be resolved. It can be any value other than document root.
- \param allocator Allocator for creating the values if the specified value or its parents are not exist.
- \param alreadyExist If non-null, it stores whether the resolved value is already exist.
- \return The resolved newly created (a JSON Null value), or already exists value.
- */
- ValueType& Create(ValueType& root, typename ValueType::AllocatorType& allocator, bool* alreadyExist = 0) const {
- RAPIDJSON_ASSERT(IsValid());
- ValueType* v = &root;
- bool exist = true;
- for (const Token *t = tokens_; t != tokens_ + tokenCount_; ++t) {
- if (v->IsArray() && t->name[0] == '-' && t->length == 1) {
- v->PushBack(ValueType().Move(), allocator);
- v = &((*v)[v->Size() - 1]);
- exist = false;
- }
- else {
- if (t->index == kPointerInvalidIndex) { // must be object name
- if (!v->IsObject())
- v->SetObject(); // Change to Object
- }
- else { // object name or array index
- if (!v->IsArray() && !v->IsObject())
- v->SetArray(); // Change to Array
- }
-
- if (v->IsArray()) {
- if (t->index >= v->Size()) {
- v->Reserve(t->index + 1, allocator);
- while (t->index >= v->Size())
- v->PushBack(ValueType().Move(), allocator);
- exist = false;
- }
- v = &((*v)[t->index]);
- }
- else {
- typename ValueType::MemberIterator m = v->FindMember(GenericStringRef<Ch>(t->name, t->length));
- if (m == v->MemberEnd()) {
- v->AddMember(ValueType(t->name, t->length, allocator).Move(), ValueType().Move(), allocator);
- v = &(--v->MemberEnd())->value; // Assumes AddMember() appends at the end
- exist = false;
- }
- else
- v = &m->value;
- }
- }
- }
-
- if (alreadyExist)
- *alreadyExist = exist;
-
- return *v;
- }
-
- //! Creates a value in a document.
- /*!
- \param document A document to be resolved.
- \param alreadyExist If non-null, it stores whether the resolved value is already exist.
- \return The resolved newly created, or already exists value.
- */
- template <typename stackAllocator>
- ValueType& Create(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, bool* alreadyExist = 0) const {
- return Create(document, document.GetAllocator(), alreadyExist);
- }
-
- //@}
-
- //!@name Query value
- //@{
-
- //! Query a value in a subtree.
- /*!
- \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root.
- \param unresolvedTokenIndex If the pointer cannot resolve a token in the pointer, this parameter can obtain the index of unresolved token.
- \return Pointer to the value if it can be resolved. Otherwise null.
-
- \note
- There are only 3 situations when a value cannot be resolved:
- 1. A value in the path is not an array nor object.
- 2. An object value does not contain the token.
- 3. A token is out of range of an array value.
-
- Use unresolvedTokenIndex to retrieve the token index.
- */
- ValueType* Get(ValueType& root, size_t* unresolvedTokenIndex = 0) const {
- RAPIDJSON_ASSERT(IsValid());
- ValueType* v = &root;
- for (const Token *t = tokens_; t != tokens_ + tokenCount_; ++t) {
- switch (v->GetType()) {
- case kObjectType:
- {
- typename ValueType::MemberIterator m = v->FindMember(GenericStringRef<Ch>(t->name, t->length));
- if (m == v->MemberEnd())
- break;
- v = &m->value;
- }
- continue;
- case kArrayType:
- if (t->index == kPointerInvalidIndex || t->index >= v->Size())
- break;
- v = &((*v)[t->index]);
- continue;
- default:
- break;
- }
-
- // Error: unresolved token
- if (unresolvedTokenIndex)
- *unresolvedTokenIndex = static_cast<size_t>(t - tokens_);
- return 0;
- }
- return v;
- }
-
- //! Query a const value in a const subtree.
- /*!
- \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root.
- \return Pointer to the value if it can be resolved. Otherwise null.
- */
- const ValueType* Get(const ValueType& root, size_t* unresolvedTokenIndex = 0) const {
- return Get(const_cast<ValueType&>(root), unresolvedTokenIndex);
- }
-
- //@}
-
- //!@name Query a value with default
- //@{
-
- //! Query a value in a subtree with default value.
- /*!
- Similar to Get(), but if the specified value do not exists, it creates all parents and clone the default value.
- So that this function always succeed.
-
- \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root.
- \param defaultValue Default value to be cloned if the value was not exists.
- \param allocator Allocator for creating the values if the specified value or its parents are not exist.
- \see Create()
- */
- ValueType& GetWithDefault(ValueType& root, const ValueType& defaultValue, typename ValueType::AllocatorType& allocator) const {
- bool alreadyExist;
- ValueType& v = Create(root, allocator, &alreadyExist);
- return alreadyExist ? v : v.CopyFrom(defaultValue, allocator);
- }
-
- //! Query a value in a subtree with default null-terminated string.
- ValueType& GetWithDefault(ValueType& root, const Ch* defaultValue, typename ValueType::AllocatorType& allocator) const {
- bool alreadyExist;
- ValueType& v = Create(root, allocator, &alreadyExist);
- return alreadyExist ? v : v.SetString(defaultValue, allocator);
- }
-
-#if RAPIDJSON_HAS_STDSTRING
- //! Query a value in a subtree with default std::basic_string.
- ValueType& GetWithDefault(ValueType& root, const std::basic_string<Ch>& defaultValue, typename ValueType::AllocatorType& allocator) const {
- bool alreadyExist;
- ValueType& v = Create(root, allocator, &alreadyExist);
- return alreadyExist ? v : v.SetString(defaultValue, allocator);
- }
-#endif
-
- //! Query a value in a subtree with default primitive value.
- /*!
- \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool
- */
- template <typename T>
- RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (ValueType&))
- GetWithDefault(ValueType& root, T defaultValue, typename ValueType::AllocatorType& allocator) const {
- return GetWithDefault(root, ValueType(defaultValue).Move(), allocator);
- }
-
- //! Query a value in a document with default value.
- template <typename stackAllocator>
- ValueType& GetWithDefault(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const ValueType& defaultValue) const {
- return GetWithDefault(document, defaultValue, document.GetAllocator());
- }
-
- //! Query a value in a document with default null-terminated string.
- template <typename stackAllocator>
- ValueType& GetWithDefault(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const Ch* defaultValue) const {
- return GetWithDefault(document, defaultValue, document.GetAllocator());
- }
-
-#if RAPIDJSON_HAS_STDSTRING
- //! Query a value in a document with default std::basic_string.
- template <typename stackAllocator>
- ValueType& GetWithDefault(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const std::basic_string<Ch>& defaultValue) const {
- return GetWithDefault(document, defaultValue, document.GetAllocator());
- }
-#endif
-
- //! Query a value in a document with default primitive value.
- /*!
- \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool
- */
- template <typename T, typename stackAllocator>
- RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (ValueType&))
- GetWithDefault(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, T defaultValue) const {
- return GetWithDefault(document, defaultValue, document.GetAllocator());
- }
-
- //@}
-
- //!@name Set a value
- //@{
-
- //! Set a value in a subtree, with move semantics.
- /*!
- It creates all parents if they are not exist or types are different to the tokens.
- So this function always succeeds but potentially remove existing values.
-
- \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root.
- \param value Value to be set.
- \param allocator Allocator for creating the values if the specified value or its parents are not exist.
- \see Create()
- */
- ValueType& Set(ValueType& root, ValueType& value, typename ValueType::AllocatorType& allocator) const {
- return Create(root, allocator) = value;
- }
-
- //! Set a value in a subtree, with copy semantics.
- ValueType& Set(ValueType& root, const ValueType& value, typename ValueType::AllocatorType& allocator) const {
- return Create(root, allocator).CopyFrom(value, allocator);
- }
-
- //! Set a null-terminated string in a subtree.
- ValueType& Set(ValueType& root, const Ch* value, typename ValueType::AllocatorType& allocator) const {
- return Create(root, allocator) = ValueType(value, allocator).Move();
- }
-
-#if RAPIDJSON_HAS_STDSTRING
- //! Set a std::basic_string in a subtree.
- ValueType& Set(ValueType& root, const std::basic_string<Ch>& value, typename ValueType::AllocatorType& allocator) const {
- return Create(root, allocator) = ValueType(value, allocator).Move();
- }
-#endif
-
- //! Set a primitive value in a subtree.
- /*!
- \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool
- */
- template <typename T>
- RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (ValueType&))
- Set(ValueType& root, T value, typename ValueType::AllocatorType& allocator) const {
- return Create(root, allocator) = ValueType(value).Move();
- }
-
- //! Set a value in a document, with move semantics.
- template <typename stackAllocator>
- ValueType& Set(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, ValueType& value) const {
- return Create(document) = value;
- }
-
- //! Set a value in a document, with copy semantics.
- template <typename stackAllocator>
- ValueType& Set(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const ValueType& value) const {
- return Create(document).CopyFrom(value, document.GetAllocator());
- }
-
- //! Set a null-terminated string in a document.
- template <typename stackAllocator>
- ValueType& Set(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const Ch* value) const {
- return Create(document) = ValueType(value, document.GetAllocator()).Move();
- }
-
-#if RAPIDJSON_HAS_STDSTRING
- //! Sets a std::basic_string in a document.
- template <typename stackAllocator>
- ValueType& Set(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const std::basic_string<Ch>& value) const {
- return Create(document) = ValueType(value, document.GetAllocator()).Move();
- }
-#endif
-
- //! Set a primitive value in a document.
- /*!
- \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool
- */
- template <typename T, typename stackAllocator>
- RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (ValueType&))
- Set(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, T value) const {
- return Create(document) = value;
- }
-
- //@}
-
- //!@name Swap a value
- //@{
-
- //! Swap a value with a value in a subtree.
- /*!
- It creates all parents if they are not exist or types are different to the tokens.
- So this function always succeeds but potentially remove existing values.
-
- \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root.
- \param value Value to be swapped.
- \param allocator Allocator for creating the values if the specified value or its parents are not exist.
- \see Create()
- */
- ValueType& Swap(ValueType& root, ValueType& value, typename ValueType::AllocatorType& allocator) const {
- return Create(root, allocator).Swap(value);
- }
-
- //! Swap a value with a value in a document.
- template <typename stackAllocator>
- ValueType& Swap(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, ValueType& value) const {
- return Create(document).Swap(value);
- }
-
- //@}
-
- //! Erase a value in a subtree.
- /*!
- \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root.
- \return Whether the resolved value is found and erased.
-
- \note Erasing with an empty pointer \c Pointer(""), i.e. the root, always fail and return false.
- */
- bool Erase(ValueType& root) const {
- RAPIDJSON_ASSERT(IsValid());
- if (tokenCount_ == 0) // Cannot erase the root
- return false;
-
- ValueType* v = &root;
- const Token* last = tokens_ + (tokenCount_ - 1);
- for (const Token *t = tokens_; t != last; ++t) {
- switch (v->GetType()) {
- case kObjectType:
- {
- typename ValueType::MemberIterator m = v->FindMember(GenericStringRef<Ch>(t->name, t->length));
- if (m == v->MemberEnd())
- return false;
- v = &m->value;
- }
- break;
- case kArrayType:
- if (t->index == kPointerInvalidIndex || t->index >= v->Size())
- return false;
- v = &((*v)[t->index]);
- break;
- default:
- return false;
- }
- }
-
- switch (v->GetType()) {
- case kObjectType:
- return v->EraseMember(GenericStringRef<Ch>(last->name, last->length));
- case kArrayType:
- if (last->index == kPointerInvalidIndex || last->index >= v->Size())
- return false;
- v->Erase(v->Begin() + last->index);
- return true;
- default:
- return false;
- }
- }
-
-private:
- //! Clone the content from rhs to this.
- /*!
- \param rhs Source pointer.
- \param extraToken Extra tokens to be allocated.
- \param extraNameBufferSize Extra name buffer size (in number of Ch) to be allocated.
- \return Start of non-occupied name buffer, for storing extra names.
- */
- Ch* CopyFromRaw(const GenericPointer& rhs, size_t extraToken = 0, size_t extraNameBufferSize = 0) {
- if (!allocator_) // allocator is independently owned.
- ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)();
-
- size_t nameBufferSize = rhs.tokenCount_; // null terminators for tokens
- for (Token *t = rhs.tokens_; t != rhs.tokens_ + rhs.tokenCount_; ++t)
- nameBufferSize += t->length;
-
- tokenCount_ = rhs.tokenCount_ + extraToken;
- tokens_ = static_cast<Token *>(allocator_->Malloc(tokenCount_ * sizeof(Token) + (nameBufferSize + extraNameBufferSize) * sizeof(Ch)));
- nameBuffer_ = reinterpret_cast<Ch *>(tokens_ + tokenCount_);
- if (rhs.tokenCount_ > 0) {
- std::memcpy(tokens_, rhs.tokens_, rhs.tokenCount_ * sizeof(Token));
- }
- if (nameBufferSize > 0) {
- std::memcpy(nameBuffer_, rhs.nameBuffer_, nameBufferSize * sizeof(Ch));
- }
-
- // Adjust pointers to name buffer
- std::ptrdiff_t diff = nameBuffer_ - rhs.nameBuffer_;
- for (Token *t = tokens_; t != tokens_ + rhs.tokenCount_; ++t)
- t->name += diff;
-
- return nameBuffer_ + nameBufferSize;
- }
-
- //! Check whether a character should be percent-encoded.
- /*!
- According to RFC 3986 2.3 Unreserved Characters.
- \param c The character (code unit) to be tested.
- */
- bool NeedPercentEncode(Ch c) const {
- return !((c >= '0' && c <= '9') || (c >= 'A' && c <='Z') || (c >= 'a' && c <= 'z') || c == '-' || c == '.' || c == '_' || c =='~');
- }
-
- //! Parse a JSON String or its URI fragment representation into tokens.
-#ifndef __clang__ // -Wdocumentation
- /*!
- \param source Either a JSON Pointer string, or its URI fragment representation. Not need to be null terminated.
- \param length Length of the source string.
- \note Source cannot be JSON String Representation of JSON Pointer, e.g. In "/\u0000", \u0000 will not be unescaped.
- */
-#endif
- void Parse(const Ch* source, size_t length) {
- RAPIDJSON_ASSERT(source != NULL);
- RAPIDJSON_ASSERT(nameBuffer_ == 0);
- RAPIDJSON_ASSERT(tokens_ == 0);
-
- // Create own allocator if user did not supply.
- if (!allocator_)
- ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)();
-
- // Count number of '/' as tokenCount
- tokenCount_ = 0;
- for (const Ch* s = source; s != source + length; s++)
- if (*s == '/')
- tokenCount_++;
-
- Token* token = tokens_ = static_cast<Token *>(allocator_->Malloc(tokenCount_ * sizeof(Token) + length * sizeof(Ch)));
- Ch* name = nameBuffer_ = reinterpret_cast<Ch *>(tokens_ + tokenCount_);
- size_t i = 0;
-
- // Detect if it is a URI fragment
- bool uriFragment = false;
- if (source[i] == '#') {
- uriFragment = true;
- i++;
- }
-
- if (i != length && source[i] != '/') {
- parseErrorCode_ = kPointerParseErrorTokenMustBeginWithSolidus;
- goto error;
- }
-
- while (i < length) {
- RAPIDJSON_ASSERT(source[i] == '/');
- i++; // consumes '/'
-
- token->name = name;
- bool isNumber = true;
-
- while (i < length && source[i] != '/') {
- Ch c = source[i];
- if (uriFragment) {
- // Decoding percent-encoding for URI fragment
- if (c == '%') {
- PercentDecodeStream is(&source[i], source + length);
- GenericInsituStringStream<EncodingType> os(name);
- Ch* begin = os.PutBegin();
- if (!Transcoder<UTF8<>, EncodingType>().Validate(is, os) || !is.IsValid()) {
- parseErrorCode_ = kPointerParseErrorInvalidPercentEncoding;
- goto error;
- }
- size_t len = os.PutEnd(begin);
- i += is.Tell() - 1;
- if (len == 1)
- c = *name;
- else {
- name += len;
- isNumber = false;
- i++;
- continue;
- }
- }
- else if (NeedPercentEncode(c)) {
- parseErrorCode_ = kPointerParseErrorCharacterMustPercentEncode;
- goto error;
- }
- }
-
- i++;
-
- // Escaping "~0" -> '~', "~1" -> '/'
- if (c == '~') {
- if (i < length) {
- c = source[i];
- if (c == '0') c = '~';
- else if (c == '1') c = '/';
- else {
- parseErrorCode_ = kPointerParseErrorInvalidEscape;
- goto error;
- }
- i++;
- }
- else {
- parseErrorCode_ = kPointerParseErrorInvalidEscape;
- goto error;
- }
- }
-
- // First check for index: all of characters are digit
- if (c < '0' || c > '9')
- isNumber = false;
-
- *name++ = c;
- }
- token->length = static_cast<SizeType>(name - token->name);
- if (token->length == 0)
- isNumber = false;
- *name++ = '\0'; // Null terminator
-
- // Second check for index: more than one digit cannot have leading zero
- if (isNumber && token->length > 1 && token->name[0] == '0')
- isNumber = false;
-
- // String to SizeType conversion
- SizeType n = 0;
- if (isNumber) {
- for (size_t j = 0; j < token->length; j++) {
- SizeType m = n * 10 + static_cast<SizeType>(token->name[j] - '0');
- if (m < n) { // overflow detection
- isNumber = false;
- break;
- }
- n = m;
- }
- }
-
- token->index = isNumber ? n : kPointerInvalidIndex;
- token++;
- }
-
- RAPIDJSON_ASSERT(name <= nameBuffer_ + length); // Should not overflow buffer
- parseErrorCode_ = kPointerParseErrorNone;
- return;
-
- error:
- Allocator::Free(tokens_);
- nameBuffer_ = 0;
- tokens_ = 0;
- tokenCount_ = 0;
- parseErrorOffset_ = i;
- return;
- }
-
- //! Stringify to string or URI fragment representation.
- /*!
- \tparam uriFragment True for stringifying to URI fragment representation. False for string representation.
- \tparam OutputStream type of output stream.
- \param os The output stream.
- */
- template<bool uriFragment, typename OutputStream>
- bool Stringify(OutputStream& os) const {
- RAPIDJSON_ASSERT(IsValid());
-
- if (uriFragment)
- os.Put('#');
-
- for (Token *t = tokens_; t != tokens_ + tokenCount_; ++t) {
- os.Put('/');
- for (size_t j = 0; j < t->length; j++) {
- Ch c = t->name[j];
- if (c == '~') {
- os.Put('~');
- os.Put('0');
- }
- else if (c == '/') {
- os.Put('~');
- os.Put('1');
- }
- else if (uriFragment && NeedPercentEncode(c)) {
- // Transcode to UTF8 sequence
- GenericStringStream<typename ValueType::EncodingType> source(&t->name[j]);
- PercentEncodeStream<OutputStream> target(os);
- if (!Transcoder<EncodingType, UTF8<> >().Validate(source, target))
- return false;
- j += source.Tell() - 1;
- }
- else
- os.Put(c);
- }
- }
- return true;
- }
-
- //! A helper stream for decoding a percent-encoded sequence into code unit.
- /*!
- This stream decodes %XY triplet into code unit (0-255).
- If it encounters invalid characters, it sets output code unit as 0 and
- mark invalid, and to be checked by IsValid().
- */
- class PercentDecodeStream {
- public:
- typedef typename ValueType::Ch Ch;
-
- //! Constructor
- /*!
- \param source Start of the stream
- \param end Past-the-end of the stream.
- */
- PercentDecodeStream(const Ch* source, const Ch* end) : src_(source), head_(source), end_(end), valid_(true) {}
-
- Ch Take() {
- if (*src_ != '%' || src_ + 3 > end_) { // %XY triplet
- valid_ = false;
- return 0;
- }
- src_++;
- Ch c = 0;
- for (int j = 0; j < 2; j++) {
- c = static_cast<Ch>(c << 4);
- Ch h = *src_;
- if (h >= '0' && h <= '9') c = static_cast<Ch>(c + h - '0');
- else if (h >= 'A' && h <= 'F') c = static_cast<Ch>(c + h - 'A' + 10);
- else if (h >= 'a' && h <= 'f') c = static_cast<Ch>(c + h - 'a' + 10);
- else {
- valid_ = false;
- return 0;
- }
- src_++;
- }
- return c;
- }
-
- size_t Tell() const { return static_cast<size_t>(src_ - head_); }
- bool IsValid() const { return valid_; }
-
- private:
- const Ch* src_; //!< Current read position.
- const Ch* head_; //!< Original head of the string.
- const Ch* end_; //!< Past-the-end position.
- bool valid_; //!< Whether the parsing is valid.
- };
-
- //! A helper stream to encode character (UTF-8 code unit) into percent-encoded sequence.
- template <typename OutputStream>
- class PercentEncodeStream {
- public:
- PercentEncodeStream(OutputStream& os) : os_(os) {}
- void Put(char c) { // UTF-8 must be byte
- unsigned char u = static_cast<unsigned char>(c);
- static const char hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
- os_.Put('%');
- os_.Put(static_cast<typename OutputStream::Ch>(hexDigits[u >> 4]));
- os_.Put(static_cast<typename OutputStream::Ch>(hexDigits[u & 15]));
- }
- private:
- OutputStream& os_;
- };
-
- Allocator* allocator_; //!< The current allocator. It is either user-supplied or equal to ownAllocator_.
- Allocator* ownAllocator_; //!< Allocator owned by this Pointer.
- Ch* nameBuffer_; //!< A buffer containing all names in tokens.
- Token* tokens_; //!< A list of tokens.
- size_t tokenCount_; //!< Number of tokens in tokens_.
- size_t parseErrorOffset_; //!< Offset in code unit when parsing fail.
- PointerParseErrorCode parseErrorCode_; //!< Parsing error code.
-};
-
-//! GenericPointer for Value (UTF-8, default allocator).
-typedef GenericPointer<Value> Pointer;
-
-//!@name Helper functions for GenericPointer
-//@{
-
-//////////////////////////////////////////////////////////////////////////////
-
-template <typename T>
-typename T::ValueType& CreateValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, typename T::AllocatorType& a) {
- return pointer.Create(root, a);
-}
-
-template <typename T, typename CharType, size_t N>
-typename T::ValueType& CreateValueByPointer(T& root, const CharType(&source)[N], typename T::AllocatorType& a) {
- return GenericPointer<typename T::ValueType>(source, N - 1).Create(root, a);
-}
-
-// No allocator parameter
-
-template <typename DocumentType>
-typename DocumentType::ValueType& CreateValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer) {
- return pointer.Create(document);
-}
-
-template <typename DocumentType, typename CharType, size_t N>
-typename DocumentType::ValueType& CreateValueByPointer(DocumentType& document, const CharType(&source)[N]) {
- return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Create(document);
-}
-
-//////////////////////////////////////////////////////////////////////////////
-
-template <typename T>
-typename T::ValueType* GetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, size_t* unresolvedTokenIndex = 0) {
- return pointer.Get(root, unresolvedTokenIndex);
-}
-
-template <typename T>
-const typename T::ValueType* GetValueByPointer(const T& root, const GenericPointer<typename T::ValueType>& pointer, size_t* unresolvedTokenIndex = 0) {
- return pointer.Get(root, unresolvedTokenIndex);
-}
-
-template <typename T, typename CharType, size_t N>
-typename T::ValueType* GetValueByPointer(T& root, const CharType (&source)[N], size_t* unresolvedTokenIndex = 0) {
- return GenericPointer<typename T::ValueType>(source, N - 1).Get(root, unresolvedTokenIndex);
-}
-
-template <typename T, typename CharType, size_t N>
-const typename T::ValueType* GetValueByPointer(const T& root, const CharType(&source)[N], size_t* unresolvedTokenIndex = 0) {
- return GenericPointer<typename T::ValueType>(source, N - 1).Get(root, unresolvedTokenIndex);
-}
-
-//////////////////////////////////////////////////////////////////////////////
-
-template <typename T>
-typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer<typename T::ValueType>& pointer, const typename T::ValueType& defaultValue, typename T::AllocatorType& a) {
- return pointer.GetWithDefault(root, defaultValue, a);
-}
-
-template <typename T>
-typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer<typename T::ValueType>& pointer, const typename T::Ch* defaultValue, typename T::AllocatorType& a) {
- return pointer.GetWithDefault(root, defaultValue, a);
-}
-
-#if RAPIDJSON_HAS_STDSTRING
-template <typename T>
-typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer<typename T::ValueType>& pointer, const std::basic_string<typename T::Ch>& defaultValue, typename T::AllocatorType& a) {
- return pointer.GetWithDefault(root, defaultValue, a);
-}
-#endif
-
-template <typename T, typename T2>
-RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename T::ValueType&))
-GetValueByPointerWithDefault(T& root, const GenericPointer<typename T::ValueType>& pointer, T2 defaultValue, typename T::AllocatorType& a) {
- return pointer.GetWithDefault(root, defaultValue, a);
-}
-
-template <typename T, typename CharType, size_t N>
-typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const typename T::ValueType& defaultValue, typename T::AllocatorType& a) {
- return GenericPointer<typename T::ValueType>(source, N - 1).GetWithDefault(root, defaultValue, a);
-}
-
-template <typename T, typename CharType, size_t N>
-typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const typename T::Ch* defaultValue, typename T::AllocatorType& a) {
- return GenericPointer<typename T::ValueType>(source, N - 1).GetWithDefault(root, defaultValue, a);
-}
-
-#if RAPIDJSON_HAS_STDSTRING
-template <typename T, typename CharType, size_t N>
-typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const std::basic_string<typename T::Ch>& defaultValue, typename T::AllocatorType& a) {
- return GenericPointer<typename T::ValueType>(source, N - 1).GetWithDefault(root, defaultValue, a);
-}
-#endif
-
-template <typename T, typename CharType, size_t N, typename T2>
-RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename T::ValueType&))
-GetValueByPointerWithDefault(T& root, const CharType(&source)[N], T2 defaultValue, typename T::AllocatorType& a) {
- return GenericPointer<typename T::ValueType>(source, N - 1).GetWithDefault(root, defaultValue, a);
-}
-
-// No allocator parameter
-
-template <typename DocumentType>
-typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const typename DocumentType::ValueType& defaultValue) {
- return pointer.GetWithDefault(document, defaultValue);
-}
-
-template <typename DocumentType>
-typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const typename DocumentType::Ch* defaultValue) {
- return pointer.GetWithDefault(document, defaultValue);
-}
-
-#if RAPIDJSON_HAS_STDSTRING
-template <typename DocumentType>
-typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const std::basic_string<typename DocumentType::Ch>& defaultValue) {
- return pointer.GetWithDefault(document, defaultValue);
-}
-#endif
-
-template <typename DocumentType, typename T2>
-RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename DocumentType::ValueType&))
-GetValueByPointerWithDefault(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, T2 defaultValue) {
- return pointer.GetWithDefault(document, defaultValue);
-}
-
-template <typename DocumentType, typename CharType, size_t N>
-typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const typename DocumentType::ValueType& defaultValue) {
- return GenericPointer<typename DocumentType::ValueType>(source, N - 1).GetWithDefault(document, defaultValue);
-}
-
-template <typename DocumentType, typename CharType, size_t N>
-typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const typename DocumentType::Ch* defaultValue) {
- return GenericPointer<typename DocumentType::ValueType>(source, N - 1).GetWithDefault(document, defaultValue);
-}
-
-#if RAPIDJSON_HAS_STDSTRING
-template <typename DocumentType, typename CharType, size_t N>
-typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const std::basic_string<typename DocumentType::Ch>& defaultValue) {
- return GenericPointer<typename DocumentType::ValueType>(source, N - 1).GetWithDefault(document, defaultValue);
-}
-#endif
-
-template <typename DocumentType, typename CharType, size_t N, typename T2>
-RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename DocumentType::ValueType&))
-GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], T2 defaultValue) {
- return GenericPointer<typename DocumentType::ValueType>(source, N - 1).GetWithDefault(document, defaultValue);
-}
-
-//////////////////////////////////////////////////////////////////////////////
-
-template <typename T>
-typename T::ValueType& SetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, typename T::ValueType& value, typename T::AllocatorType& a) {
- return pointer.Set(root, value, a);
-}
-
-template <typename T>
-typename T::ValueType& SetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, const typename T::ValueType& value, typename T::AllocatorType& a) {
- return pointer.Set(root, value, a);
-}
-
-template <typename T>
-typename T::ValueType& SetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, const typename T::Ch* value, typename T::AllocatorType& a) {
- return pointer.Set(root, value, a);
-}
-
-#if RAPIDJSON_HAS_STDSTRING
-template <typename T>
-typename T::ValueType& SetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, const std::basic_string<typename T::Ch>& value, typename T::AllocatorType& a) {
- return pointer.Set(root, value, a);
-}
-#endif
-
-template <typename T, typename T2>
-RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename T::ValueType&))
-SetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, T2 value, typename T::AllocatorType& a) {
- return pointer.Set(root, value, a);
-}
-
-template <typename T, typename CharType, size_t N>
-typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], typename T::ValueType& value, typename T::AllocatorType& a) {
- return GenericPointer<typename T::ValueType>(source, N - 1).Set(root, value, a);
-}
-
-template <typename T, typename CharType, size_t N>
-typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const typename T::ValueType& value, typename T::AllocatorType& a) {
- return GenericPointer<typename T::ValueType>(source, N - 1).Set(root, value, a);
-}
-
-template <typename T, typename CharType, size_t N>
-typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const typename T::Ch* value, typename T::AllocatorType& a) {
- return GenericPointer<typename T::ValueType>(source, N - 1).Set(root, value, a);
-}
-
-#if RAPIDJSON_HAS_STDSTRING
-template <typename T, typename CharType, size_t N>
-typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const std::basic_string<typename T::Ch>& value, typename T::AllocatorType& a) {
- return GenericPointer<typename T::ValueType>(source, N - 1).Set(root, value, a);
-}
-#endif
-
-template <typename T, typename CharType, size_t N, typename T2>
-RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename T::ValueType&))
-SetValueByPointer(T& root, const CharType(&source)[N], T2 value, typename T::AllocatorType& a) {
- return GenericPointer<typename T::ValueType>(source, N - 1).Set(root, value, a);
-}
-
-// No allocator parameter
-
-template <typename DocumentType>
-typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, typename DocumentType::ValueType& value) {
- return pointer.Set(document, value);
-}
-
-template <typename DocumentType>
-typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const typename DocumentType::ValueType& value) {
- return pointer.Set(document, value);
-}
-
-template <typename DocumentType>
-typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const typename DocumentType::Ch* value) {
- return pointer.Set(document, value);
-}
-
-#if RAPIDJSON_HAS_STDSTRING
-template <typename DocumentType>
-typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const std::basic_string<typename DocumentType::Ch>& value) {
- return pointer.Set(document, value);
-}
-#endif
-
-template <typename DocumentType, typename T2>
-RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename DocumentType::ValueType&))
-SetValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, T2 value) {
- return pointer.Set(document, value);
-}
-
-template <typename DocumentType, typename CharType, size_t N>
-typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], typename DocumentType::ValueType& value) {
- return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Set(document, value);
-}
-
-template <typename DocumentType, typename CharType, size_t N>
-typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const typename DocumentType::ValueType& value) {
- return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Set(document, value);
-}
-
-template <typename DocumentType, typename CharType, size_t N>
-typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const typename DocumentType::Ch* value) {
- return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Set(document, value);
-}
-
-#if RAPIDJSON_HAS_STDSTRING
-template <typename DocumentType, typename CharType, size_t N>
-typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const std::basic_string<typename DocumentType::Ch>& value) {
- return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Set(document, value);
-}
-#endif
-
-template <typename DocumentType, typename CharType, size_t N, typename T2>
-RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename DocumentType::ValueType&))
-SetValueByPointer(DocumentType& document, const CharType(&source)[N], T2 value) {
- return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Set(document, value);
-}
-
-//////////////////////////////////////////////////////////////////////////////
-
-template <typename T>
-typename T::ValueType& SwapValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, typename T::ValueType& value, typename T::AllocatorType& a) {
- return pointer.Swap(root, value, a);
-}
-
-template <typename T, typename CharType, size_t N>
-typename T::ValueType& SwapValueByPointer(T& root, const CharType(&source)[N], typename T::ValueType& value, typename T::AllocatorType& a) {
- return GenericPointer<typename T::ValueType>(source, N - 1).Swap(root, value, a);
-}
-
-template <typename DocumentType>
-typename DocumentType::ValueType& SwapValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, typename DocumentType::ValueType& value) {
- return pointer.Swap(document, value);
-}
-
-template <typename DocumentType, typename CharType, size_t N>
-typename DocumentType::ValueType& SwapValueByPointer(DocumentType& document, const CharType(&source)[N], typename DocumentType::ValueType& value) {
- return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Swap(document, value);
-}
-
-//////////////////////////////////////////////////////////////////////////////
-
-template <typename T>
-bool EraseValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer) {
- return pointer.Erase(root);
-}
-
-template <typename T, typename CharType, size_t N>
-bool EraseValueByPointer(T& root, const CharType(&source)[N]) {
- return GenericPointer<typename T::ValueType>(source, N - 1).Erase(root);
-}
-
-//@}
-
-RAPIDJSON_NAMESPACE_END
-
-#if defined(__clang__) || defined(_MSC_VER)
-RAPIDJSON_DIAG_POP
-#endif
-
-#endif // RAPIDJSON_POINTER_H_
diff --git a/src/native/external/rapidjson/prettywriter.h b/src/native/external/rapidjson/prettywriter.h
deleted file mode 100644
index 45afb6949deb..000000000000
--- a/src/native/external/rapidjson/prettywriter.h
+++ /dev/null
@@ -1,277 +0,0 @@
-// Tencent is pleased to support the open source community by making RapidJSON available.
-//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
-//
-// Licensed under the MIT License (the "License"); you may not use this file except
-// in compliance with the License. You may obtain a copy of the License at
-//
-// http://opensource.org/licenses/MIT
-//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations under the License.
-
-#ifndef RAPIDJSON_PRETTYWRITER_H_
-#define RAPIDJSON_PRETTYWRITER_H_
-
-#include "writer.h"
-
-#ifdef __GNUC__
-RAPIDJSON_DIAG_PUSH
-RAPIDJSON_DIAG_OFF(effc++)
-#endif
-
-#if defined(__clang__)
-RAPIDJSON_DIAG_PUSH
-RAPIDJSON_DIAG_OFF(c++98-compat)
-#endif
-
-RAPIDJSON_NAMESPACE_BEGIN
-
-//! Combination of PrettyWriter format flags.
-/*! \see PrettyWriter::SetFormatOptions
- */
-enum PrettyFormatOptions {
- kFormatDefault = 0, //!< Default pretty formatting.
- kFormatSingleLineArray = 1 //!< Format arrays on a single line.
-};
-
-//! Writer with indentation and spacing.
-/*!
- \tparam OutputStream Type of output os.
- \tparam SourceEncoding Encoding of source string.
- \tparam TargetEncoding Encoding of output stream.
- \tparam StackAllocator Type of allocator for allocating memory of stack.
-*/
-template<typename OutputStream, typename SourceEncoding = UTF8<>, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags>
-class PrettyWriter : public Writer<OutputStream, SourceEncoding, TargetEncoding, StackAllocator, writeFlags> {
-public:
- typedef Writer<OutputStream, SourceEncoding, TargetEncoding, StackAllocator, writeFlags> Base;
- typedef typename Base::Ch Ch;
-
- //! Constructor
- /*! \param os Output stream.
- \param allocator User supplied allocator. If it is null, it will create a private one.
- \param levelDepth Initial capacity of stack.
- */
- explicit PrettyWriter(OutputStream& os, StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) :
- Base(os, allocator, levelDepth), indentChar_(' '), indentCharCount_(4), formatOptions_(kFormatDefault) {}
-
-
- explicit PrettyWriter(StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) :
- Base(allocator, levelDepth), indentChar_(' '), indentCharCount_(4) {}
-
-#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
- PrettyWriter(PrettyWriter&& rhs) :
- Base(std::forward<PrettyWriter>(rhs)), indentChar_(rhs.indentChar_), indentCharCount_(rhs.indentCharCount_), formatOptions_(rhs.formatOptions_) {}
-#endif
-
- //! Set custom indentation.
- /*! \param indentChar Character for indentation. Must be whitespace character (' ', '\\t', '\\n', '\\r').
- \param indentCharCount Number of indent characters for each indentation level.
- \note The default indentation is 4 spaces.
- */
- PrettyWriter& SetIndent(Ch indentChar, unsigned indentCharCount) {
- RAPIDJSON_ASSERT(indentChar == ' ' || indentChar == '\t' || indentChar == '\n' || indentChar == '\r');
- indentChar_ = indentChar;
- indentCharCount_ = indentCharCount;
- return *this;
- }
-
- //! Set pretty writer formatting options.
- /*! \param options Formatting options.
- */
- PrettyWriter& SetFormatOptions(PrettyFormatOptions options) {
- formatOptions_ = options;
- return *this;
- }
-
- /*! @name Implementation of Handler
- \see Handler
- */
- //@{
-
- bool Null() { PrettyPrefix(kNullType); return Base::EndValue(Base::WriteNull()); }
- bool Bool(bool b) { PrettyPrefix(b ? kTrueType : kFalseType); return Base::EndValue(Base::WriteBool(b)); }
- bool Int(int i) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteInt(i)); }
- bool Uint(unsigned u) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteUint(u)); }
- bool Int64(int64_t i64) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteInt64(i64)); }
- bool Uint64(uint64_t u64) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteUint64(u64)); }
- bool Double(double d) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteDouble(d)); }
-
- bool RawNumber(const Ch* str, SizeType length, bool copy = false) {
- RAPIDJSON_ASSERT(str != 0);
- (void)copy;
- PrettyPrefix(kNumberType);
- return Base::EndValue(Base::WriteString(str, length));
- }
-
- bool String(const Ch* str, SizeType length, bool copy = false) {
- RAPIDJSON_ASSERT(str != 0);
- (void)copy;
- PrettyPrefix(kStringType);
- return Base::EndValue(Base::WriteString(str, length));
- }
-
-#if RAPIDJSON_HAS_STDSTRING
- bool String(const std::basic_string<Ch>& str) {
- return String(str.data(), SizeType(str.size()));
- }
-#endif
-
- bool StartObject() {
- PrettyPrefix(kObjectType);
- new (Base::level_stack_.template Push<typename Base::Level>()) typename Base::Level(false);
- return Base::WriteStartObject();
- }
-
- bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); }
-
-#if RAPIDJSON_HAS_STDSTRING
- bool Key(const std::basic_string<Ch>& str) {
- return Key(str.data(), SizeType(str.size()));
- }
-#endif
-
- bool EndObject(SizeType memberCount = 0) {
- (void)memberCount;
- RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level)); // not inside an Object
- RAPIDJSON_ASSERT(!Base::level_stack_.template Top<typename Base::Level>()->inArray); // currently inside an Array, not Object
- RAPIDJSON_ASSERT(0 == Base::level_stack_.template Top<typename Base::Level>()->valueCount % 2); // Object has a Key without a Value
-
- bool empty = Base::level_stack_.template Pop<typename Base::Level>(1)->valueCount == 0;
-
- if (!empty) {
- Base::os_->Put('\n');
- WriteIndent();
- }
- bool ret = Base::EndValue(Base::WriteEndObject());
- (void)ret;
- RAPIDJSON_ASSERT(ret == true);
- if (Base::level_stack_.Empty()) // end of json text
- Base::Flush();
- return true;
- }
-
- bool StartArray() {
- PrettyPrefix(kArrayType);
- new (Base::level_stack_.template Push<typename Base::Level>()) typename Base::Level(true);
- return Base::WriteStartArray();
- }
-
- bool EndArray(SizeType memberCount = 0) {
- (void)memberCount;
- RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level));
- RAPIDJSON_ASSERT(Base::level_stack_.template Top<typename Base::Level>()->inArray);
- bool empty = Base::level_stack_.template Pop<typename Base::Level>(1)->valueCount == 0;
-
- if (!empty && !(formatOptions_ & kFormatSingleLineArray)) {
- Base::os_->Put('\n');
- WriteIndent();
- }
- bool ret = Base::EndValue(Base::WriteEndArray());
- (void)ret;
- RAPIDJSON_ASSERT(ret == true);
- if (Base::level_stack_.Empty()) // end of json text
- Base::Flush();
- return true;
- }
-
- //@}
-
- /*! @name Convenience extensions */
- //@{
-
- //! Simpler but slower overload.
- bool String(const Ch* str) { return String(str, internal::StrLen(str)); }
- bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); }
-
- //@}
-
- //! Write a raw JSON value.
- /*!
- For user to write a stringified JSON as a value.
-
- \param json A well-formed JSON value. It should not contain null character within [0, length - 1] range.
- \param length Length of the json.
- \param type Type of the root of json.
- \note When using PrettyWriter::RawValue(), the result json may not be indented correctly.
- */
- bool RawValue(const Ch* json, size_t length, Type type) {
- RAPIDJSON_ASSERT(json != 0);
- PrettyPrefix(type);
- return Base::EndValue(Base::WriteRawValue(json, length));
- }
-
-protected:
- void PrettyPrefix(Type type) {
- (void)type;
- if (Base::level_stack_.GetSize() != 0) { // this value is not at root
- typename Base::Level* level = Base::level_stack_.template Top<typename Base::Level>();
-
- if (level->inArray) {
- if (level->valueCount > 0) {
- Base::os_->Put(','); // add comma if it is not the first element in array
- if (formatOptions_ & kFormatSingleLineArray)
- Base::os_->Put(' ');
- }
-
- if (!(formatOptions_ & kFormatSingleLineArray)) {
- Base::os_->Put('\n');
- WriteIndent();
- }
- }
- else { // in object
- if (level->valueCount > 0) {
- if (level->valueCount % 2 == 0) {
- Base::os_->Put(',');
- Base::os_->Put('\n');
- }
- else {
- Base::os_->Put(':');
- Base::os_->Put(' ');
- }
- }
- else
- Base::os_->Put('\n');
-
- if (level->valueCount % 2 == 0)
- WriteIndent();
- }
- if (!level->inArray && level->valueCount % 2 == 0)
- RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name
- level->valueCount++;
- }
- else {
- RAPIDJSON_ASSERT(!Base::hasRoot_); // Should only has one and only one root.
- Base::hasRoot_ = true;
- }
- }
-
- void WriteIndent() {
- size_t count = (Base::level_stack_.GetSize() / sizeof(typename Base::Level)) * indentCharCount_;
- PutN(*Base::os_, static_cast<typename OutputStream::Ch>(indentChar_), count);
- }
-
- Ch indentChar_;
- unsigned indentCharCount_;
- PrettyFormatOptions formatOptions_;
-
-private:
- // Prohibit copy constructor & assignment operator.
- PrettyWriter(const PrettyWriter&);
- PrettyWriter& operator=(const PrettyWriter&);
-};
-
-RAPIDJSON_NAMESPACE_END
-
-#if defined(__clang__)
-RAPIDJSON_DIAG_POP
-#endif
-
-#ifdef __GNUC__
-RAPIDJSON_DIAG_POP
-#endif
-
-#endif // RAPIDJSON_RAPIDJSON_H_
diff --git a/src/native/external/rapidjson/rapidjson.h b/src/native/external/rapidjson/rapidjson.h
index 549936ffe06c..5ea69479501a 100644
--- a/src/native/external/rapidjson/rapidjson.h
+++ b/src/native/external/rapidjson/rapidjson.h
@@ -1,15 +1,15 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
-//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_RAPIDJSON_H_
@@ -17,7 +17,7 @@
/*!\file rapidjson.h
\brief common definitions and configuration
-
+
\see RAPIDJSON_CONFIG
*/
@@ -125,6 +125,19 @@
#endif
///////////////////////////////////////////////////////////////////////////////
+// __cplusplus macro
+
+//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
+
+#if defined(_MSC_VER)
+#define RAPIDJSON_CPLUSPLUS _MSVC_LANG
+#else
+#define RAPIDJSON_CPLUSPLUS __cplusplus
+#endif
+
+//!@endcond
+
+///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_HAS_STDSTRING
#ifndef RAPIDJSON_HAS_STDSTRING
@@ -150,6 +163,24 @@
#endif // RAPIDJSON_HAS_STDSTRING
///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_USE_MEMBERSMAP
+
+/*! \def RAPIDJSON_USE_MEMBERSMAP
+ \ingroup RAPIDJSON_CONFIG
+ \brief Enable RapidJSON support for object members handling in a \c std::multimap
+
+ By defining this preprocessor symbol to \c 1, \ref rapidjson::GenericValue object
+ members are stored in a \c std::multimap for faster lookup and deletion times, a
+ trade off with a slightly slower insertion time and a small object allocat(or)ed
+ memory overhead.
+
+ \hideinitializer
+*/
+#ifndef RAPIDJSON_USE_MEMBERSMAP
+#define RAPIDJSON_USE_MEMBERSMAP 0 // not by default
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_NO_INT64DEFINE
/*! \def RAPIDJSON_NO_INT64DEFINE
@@ -164,7 +195,7 @@
*/
#ifndef RAPIDJSON_NO_INT64DEFINE
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
-#if defined(_MSC_VER) && (_MSC_VER < 1800) // Visual Studio 2013
+#if defined(_MSC_VER) && (_MSC_VER < 1800) // Visual Studio 2013
#include "msinttypes/stdint.h"
#include "msinttypes/inttypes.h"
#else
@@ -246,7 +277,7 @@
# elif defined(RAPIDJSON_DOXYGEN_RUNNING)
# define RAPIDJSON_ENDIAN
# else
-# error Unknown machine endianness detected. User needs to define RAPIDJSON_ENDIAN.
+# error Unknown machine endianness detected. User needs to define RAPIDJSON_ENDIAN.
# endif
#endif // RAPIDJSON_ENDIAN
@@ -411,7 +442,7 @@ RAPIDJSON_NAMESPACE_END
// Prefer C++11 static_assert, if available
#ifndef RAPIDJSON_STATIC_ASSERT
-#if __cplusplus >= 201103L || ( defined(_MSC_VER) && _MSC_VER >= 1800 )
+#if RAPIDJSON_CPLUSPLUS >= 201103L || ( defined(_MSC_VER) && _MSC_VER >= 1800 )
#define RAPIDJSON_STATIC_ASSERT(x) \
static_assert(x, RAPIDJSON_STRINGIFY(x))
#endif // C++11
@@ -482,7 +513,7 @@ RAPIDJSON_NAMESPACE_END
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
-#define RAPIDJSON_MULTILINEMACRO_BEGIN do {
+#define RAPIDJSON_MULTILINEMACRO_BEGIN do {
#define RAPIDJSON_MULTILINEMACRO_END \
} while((void)0, 0)
@@ -490,6 +521,12 @@ RAPIDJSON_NAMESPACE_END
#define RAPIDJSON_VERSION_CODE(x,y,z) \
(((x)*100000) + ((y)*100) + (z))
+#if defined(__has_builtin)
+#define RAPIDJSON_HAS_BUILTIN(x) __has_builtin(x)
+#else
+#define RAPIDJSON_HAS_BUILTIN(x) 0
+#endif
+
///////////////////////////////////////////////////////////////////////////////
// RAPIDJSON_DIAG_PUSH/POP, RAPIDJSON_DIAG_OFF
@@ -535,8 +572,14 @@ RAPIDJSON_NAMESPACE_END
///////////////////////////////////////////////////////////////////////////////
// C++11 features
+#ifndef RAPIDJSON_HAS_CXX11
+#define RAPIDJSON_HAS_CXX11 (RAPIDJSON_CPLUSPLUS >= 201103L)
+#endif
+
#ifndef RAPIDJSON_HAS_CXX11_RVALUE_REFS
-#if defined(__clang__)
+#if RAPIDJSON_HAS_CXX11
+#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1
+#elif defined(__clang__)
#if __has_feature(cxx_rvalue_references) && \
(defined(_MSC_VER) || defined(_LIBCPP_VERSION) || defined(__GLIBCXX__) && __GLIBCXX__ >= 20080306)
#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1
@@ -553,8 +596,14 @@ RAPIDJSON_NAMESPACE_END
#endif
#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+#include <utility> // std::move
+#endif
+
#ifndef RAPIDJSON_HAS_CXX11_NOEXCEPT
-#if defined(__clang__)
+#if RAPIDJSON_HAS_CXX11
+#define RAPIDJSON_HAS_CXX11_NOEXCEPT 1
+#elif defined(__clang__)
#define RAPIDJSON_HAS_CXX11_NOEXCEPT __has_feature(cxx_noexcept)
#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \
(defined(_MSC_VER) && _MSC_VER >= 1900) || \
@@ -564,11 +613,13 @@ RAPIDJSON_NAMESPACE_END
#define RAPIDJSON_HAS_CXX11_NOEXCEPT 0
#endif
#endif
+#ifndef RAPIDJSON_NOEXCEPT
#if RAPIDJSON_HAS_CXX11_NOEXCEPT
#define RAPIDJSON_NOEXCEPT noexcept
#else
-#define RAPIDJSON_NOEXCEPT /* noexcept */
+#define RAPIDJSON_NOEXCEPT throw()
#endif // RAPIDJSON_HAS_CXX11_NOEXCEPT
+#endif
// no automatic detection, yet
#ifndef RAPIDJSON_HAS_CXX11_TYPETRAITS
@@ -591,6 +642,27 @@ RAPIDJSON_NAMESPACE_END
#endif
#endif // RAPIDJSON_HAS_CXX11_RANGE_FOR
+///////////////////////////////////////////////////////////////////////////////
+// C++17 features
+
+#ifndef RAPIDJSON_HAS_CXX17
+#define RAPIDJSON_HAS_CXX17 (RAPIDJSON_CPLUSPLUS >= 201703L)
+#endif
+
+#if RAPIDJSON_HAS_CXX17
+# define RAPIDJSON_DELIBERATE_FALLTHROUGH [[fallthrough]]
+#elif defined(__has_cpp_attribute)
+# if __has_cpp_attribute(clang::fallthrough)
+# define RAPIDJSON_DELIBERATE_FALLTHROUGH [[clang::fallthrough]]
+# elif __has_cpp_attribute(fallthrough)
+# define RAPIDJSON_DELIBERATE_FALLTHROUGH __attribute__((fallthrough))
+# else
+# define RAPIDJSON_DELIBERATE_FALLTHROUGH
+# endif
+#else
+# define RAPIDJSON_DELIBERATE_FALLTHROUGH
+#endif
+
//!@endcond
//! Assertion (in non-throwing contexts).
@@ -609,17 +681,30 @@ RAPIDJSON_NAMESPACE_END
#ifndef RAPIDJSON_NOEXCEPT_ASSERT
#ifdef RAPIDJSON_ASSERT_THROWS
-#if RAPIDJSON_HAS_CXX11_NOEXCEPT
-#define RAPIDJSON_NOEXCEPT_ASSERT(x)
-#else
-#define RAPIDJSON_NOEXCEPT_ASSERT(x) RAPIDJSON_ASSERT(x)
-#endif // RAPIDJSON_HAS_CXX11_NOEXCEPT
+#include <cassert>
+#define RAPIDJSON_NOEXCEPT_ASSERT(x) assert(x)
#else
#define RAPIDJSON_NOEXCEPT_ASSERT(x) RAPIDJSON_ASSERT(x)
#endif // RAPIDJSON_ASSERT_THROWS
#endif // RAPIDJSON_NOEXCEPT_ASSERT
///////////////////////////////////////////////////////////////////////////////
+// malloc/realloc/free
+
+#ifndef RAPIDJSON_MALLOC
+///! customization point for global \c malloc
+#define RAPIDJSON_MALLOC(size) std::malloc(size)
+#endif
+#ifndef RAPIDJSON_REALLOC
+///! customization point for global \c realloc
+#define RAPIDJSON_REALLOC(ptr, new_size) std::realloc(ptr, new_size)
+#endif
+#ifndef RAPIDJSON_FREE
+///! customization point for global \c free
+#define RAPIDJSON_FREE(ptr) std::free(ptr)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
// new/delete
#ifndef RAPIDJSON_NEW
@@ -646,7 +731,7 @@ enum Type {
kFalseType = 1, //!< false
kTrueType = 2, //!< true
kObjectType = 3, //!< object
- kArrayType = 4, //!< array
+ kArrayType = 4, //!< array
kStringType = 5, //!< string
kNumberType = 6 //!< number
};
diff --git a/src/native/external/rapidjson/reader.h b/src/native/external/rapidjson/reader.h
index 44a6bcd30cf2..55546601e29b 100644
--- a/src/native/external/rapidjson/reader.h
+++ b/src/native/external/rapidjson/reader.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
@@ -20,6 +20,7 @@
#include "allocators.h"
#include "stream.h"
#include "encodedstream.h"
+#include "internal/clzll.h"
#include "internal/meta.h"
#include "internal/stack.h"
#include "internal/strtod.h"
@@ -153,6 +154,7 @@ enum ParseFlag {
kParseNumbersAsStringsFlag = 64, //!< Parse all numbers (ints/doubles) as strings.
kParseTrailingCommasFlag = 128, //!< Allow trailing commas at the end of objects and arrays.
kParseNanAndInfFlag = 256, //!< Allow parsing NaN, Inf, Infinity, -Inf and -Infinity as doubles.
+ kParseEscapedApostropheFlag = 512, //!< Allow escaped apostrophe in strings.
kParseDefaultFlags = RAPIDJSON_PARSE_DEFAULT_FLAGS //!< Default parse flags. Can be customized by defining RAPIDJSON_PARSE_DEFAULT_FLAGS
};
@@ -443,16 +445,16 @@ inline const char *SkipWhitespace_SIMD(const char* p) {
x = vmvnq_u8(x); // Negate
x = vrev64q_u8(x); // Rev in 64
- uint64_t low = vgetq_lane_u64(reinterpret_cast<uint64x2_t>(x), 0); // extract
- uint64_t high = vgetq_lane_u64(reinterpret_cast<uint64x2_t>(x), 1); // extract
+ uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract
+ uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract
if (low == 0) {
if (high != 0) {
- int lz =__builtin_clzll(high);;
+ uint32_t lz = internal::clzll(high);
return p + 8 + (lz >> 3);
}
} else {
- int lz = __builtin_clzll(low);;
+ uint32_t lz = internal::clzll(low);
return p + (lz >> 3);
}
}
@@ -479,16 +481,16 @@ inline const char *SkipWhitespace_SIMD(const char* p, const char* end) {
x = vmvnq_u8(x); // Negate
x = vrev64q_u8(x); // Rev in 64
- uint64_t low = vgetq_lane_u64(reinterpret_cast<uint64x2_t>(x), 0); // extract
- uint64_t high = vgetq_lane_u64(reinterpret_cast<uint64x2_t>(x), 1); // extract
+ uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract
+ uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract
if (low == 0) {
if (high != 0) {
- int lz = __builtin_clzll(high);
+ uint32_t lz = internal::clzll(high);
return p + 8 + (lz >> 3);
}
} else {
- int lz = __builtin_clzll(low);
+ uint32_t lz = internal::clzll(low);
return p + (lz >> 3);
}
}
@@ -990,7 +992,7 @@ private:
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
#define Z16 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
static const char escape[256] = {
- Z16, Z16, 0, 0,'\"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'/',
+ Z16, Z16, 0, 0,'\"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, '/',
Z16, Z16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'\\', 0, 0, 0,
0, 0,'\b', 0, 0, 0,'\f', 0, 0, 0, 0, 0, 0, 0,'\n', 0,
0, 0,'\r', 0,'\t', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1013,19 +1015,31 @@ private:
is.Take();
os.Put(static_cast<typename TEncoding::Ch>(escape[static_cast<unsigned char>(e)]));
}
+ else if ((parseFlags & kParseEscapedApostropheFlag) && RAPIDJSON_LIKELY(e == '\'')) { // Allow escaped apostrophe
+ is.Take();
+ os.Put('\'');
+ }
else if (RAPIDJSON_LIKELY(e == 'u')) { // Unicode
is.Take();
unsigned codepoint = ParseHex4(is, escapeOffset);
RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
- if (RAPIDJSON_UNLIKELY(codepoint >= 0xD800 && codepoint <= 0xDBFF)) {
- // Handle UTF-16 surrogate pair
- if (RAPIDJSON_UNLIKELY(!Consume(is, '\\') || !Consume(is, 'u')))
- RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset);
- unsigned codepoint2 = ParseHex4(is, escapeOffset);
- RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
- if (RAPIDJSON_UNLIKELY(codepoint2 < 0xDC00 || codepoint2 > 0xDFFF))
+ if (RAPIDJSON_UNLIKELY(codepoint >= 0xD800 && codepoint <= 0xDFFF)) {
+ // high surrogate, check if followed by valid low surrogate
+ if (RAPIDJSON_LIKELY(codepoint <= 0xDBFF)) {
+ // Handle UTF-16 surrogate pair
+ if (RAPIDJSON_UNLIKELY(!Consume(is, '\\') || !Consume(is, 'u')))
+ RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset);
+ unsigned codepoint2 = ParseHex4(is, escapeOffset);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+ if (RAPIDJSON_UNLIKELY(codepoint2 < 0xDC00 || codepoint2 > 0xDFFF))
+ RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset);
+ codepoint = (((codepoint - 0xD800) << 10) | (codepoint2 - 0xDC00)) + 0x10000;
+ }
+ // single low surrogate
+ else
+ {
RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset);
- codepoint = (((codepoint - 0xD800) << 10) | (codepoint2 - 0xDC00)) + 0x10000;
+ }
}
TEncoding::Encode(os, codepoint);
}
@@ -1244,19 +1258,19 @@ private:
x = vorrq_u8(x, vcltq_u8(s, s3));
x = vrev64q_u8(x); // Rev in 64
- uint64_t low = vgetq_lane_u64(reinterpret_cast<uint64x2_t>(x), 0); // extract
- uint64_t high = vgetq_lane_u64(reinterpret_cast<uint64x2_t>(x), 1); // extract
+ uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract
+ uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract
SizeType length = 0;
bool escaped = false;
if (low == 0) {
if (high != 0) {
- unsigned lz = (unsigned)__builtin_clzll(high);;
+ uint32_t lz = internal::clzll(high);
length = 8 + (lz >> 3);
escaped = true;
}
} else {
- unsigned lz = (unsigned)__builtin_clzll(low);;
+ uint32_t lz = internal::clzll(low);
length = lz >> 3;
escaped = true;
}
@@ -1314,19 +1328,19 @@ private:
x = vorrq_u8(x, vcltq_u8(s, s3));
x = vrev64q_u8(x); // Rev in 64
- uint64_t low = vgetq_lane_u64(reinterpret_cast<uint64x2_t>(x), 0); // extract
- uint64_t high = vgetq_lane_u64(reinterpret_cast<uint64x2_t>(x), 1); // extract
+ uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract
+ uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract
SizeType length = 0;
bool escaped = false;
if (low == 0) {
if (high != 0) {
- unsigned lz = (unsigned)__builtin_clzll(high);
+ uint32_t lz = internal::clzll(high);
length = 8 + (lz >> 3);
escaped = true;
}
} else {
- unsigned lz = (unsigned)__builtin_clzll(low);
+ uint32_t lz = internal::clzll(low);
length = lz >> 3;
escaped = true;
}
@@ -1370,17 +1384,17 @@ private:
x = vorrq_u8(x, vcltq_u8(s, s3));
x = vrev64q_u8(x); // Rev in 64
- uint64_t low = vgetq_lane_u64(reinterpret_cast<uint64x2_t>(x), 0); // extract
- uint64_t high = vgetq_lane_u64(reinterpret_cast<uint64x2_t>(x), 1); // extract
+ uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract
+ uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract
if (low == 0) {
if (high != 0) {
- int lz = __builtin_clzll(high);
+ uint32_t lz = internal::clzll(high);
p += 8 + (lz >> 3);
break;
}
} else {
- int lz = __builtin_clzll(low);
+ uint32_t lz = internal::clzll(low);
p += lz >> 3;
break;
}
@@ -1390,11 +1404,11 @@ private:
}
#endif // RAPIDJSON_NEON
- template<typename InputStream, bool backup, bool pushOnTake>
+ template<typename InputStream, typename StackCharacter, bool backup, bool pushOnTake>
class NumberStream;
- template<typename InputStream>
- class NumberStream<InputStream, false, false> {
+ template<typename InputStream, typename StackCharacter>
+ class NumberStream<InputStream, StackCharacter, false, false> {
public:
typedef typename InputStream::Ch Ch;
@@ -1403,11 +1417,11 @@ private:
RAPIDJSON_FORCEINLINE Ch Peek() const { return is.Peek(); }
RAPIDJSON_FORCEINLINE Ch TakePush() { return is.Take(); }
RAPIDJSON_FORCEINLINE Ch Take() { return is.Take(); }
- RAPIDJSON_FORCEINLINE void Push(char) {}
+ RAPIDJSON_FORCEINLINE void Push(char) {}
size_t Tell() { return is.Tell(); }
size_t Length() { return 0; }
- const char* Pop() { return 0; }
+ const StackCharacter* Pop() { return 0; }
protected:
NumberStream& operator=(const NumberStream&);
@@ -1415,45 +1429,47 @@ private:
InputStream& is;
};
- template<typename InputStream>
- class NumberStream<InputStream, true, false> : public NumberStream<InputStream, false, false> {
- typedef NumberStream<InputStream, false, false> Base;
+ template<typename InputStream, typename StackCharacter>
+ class NumberStream<InputStream, StackCharacter, true, false> : public NumberStream<InputStream, StackCharacter, false, false> {
+ typedef NumberStream<InputStream, StackCharacter, false, false> Base;
public:
- NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is), stackStream(reader.stack_) {}
+ NumberStream(GenericReader& reader, InputStream& s) : Base(reader, s), stackStream(reader.stack_) {}
RAPIDJSON_FORCEINLINE Ch TakePush() {
- stackStream.Put(static_cast<char>(Base::is.Peek()));
+ stackStream.Put(static_cast<StackCharacter>(Base::is.Peek()));
return Base::is.Take();
}
- RAPIDJSON_FORCEINLINE void Push(char c) {
+ RAPIDJSON_FORCEINLINE void Push(StackCharacter c) {
stackStream.Put(c);
}
size_t Length() { return stackStream.Length(); }
- const char* Pop() {
+ const StackCharacter* Pop() {
stackStream.Put('\0');
return stackStream.Pop();
}
private:
- StackStream<char> stackStream;
+ StackStream<StackCharacter> stackStream;
};
- template<typename InputStream>
- class NumberStream<InputStream, true, true> : public NumberStream<InputStream, true, false> {
- typedef NumberStream<InputStream, true, false> Base;
+ template<typename InputStream, typename StackCharacter>
+ class NumberStream<InputStream, StackCharacter, true, true> : public NumberStream<InputStream, StackCharacter, true, false> {
+ typedef NumberStream<InputStream, StackCharacter, true, false> Base;
public:
- NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is) {}
+ NumberStream(GenericReader& reader, InputStream& s) : Base(reader, s) {}
RAPIDJSON_FORCEINLINE Ch Take() { return Base::TakePush(); }
};
template<unsigned parseFlags, typename InputStream, typename Handler>
void ParseNumber(InputStream& is, Handler& handler) {
+ typedef typename internal::SelectIf<internal::BoolType<(parseFlags & kParseNumbersAsStringsFlag) != 0>, typename TargetEncoding::Ch, char>::Type NumberCharacter;
+
internal::StreamLocalCopy<InputStream> copy(is);
- NumberStream<InputStream,
+ NumberStream<InputStream, NumberCharacter,
((parseFlags & kParseNumbersAsStringsFlag) != 0) ?
((parseFlags & kParseInsituFlag) == 0) :
((parseFlags & kParseFullPrecisionFlag) != 0),
@@ -1678,10 +1694,10 @@ private:
}
else {
SizeType numCharsToCopy = static_cast<SizeType>(s.Length());
- StringStream srcStream(s.Pop());
+ GenericStringStream<UTF8<NumberCharacter> > srcStream(s.Pop());
StackStream<typename TargetEncoding::Ch> dstStream(stack_);
while (numCharsToCopy--) {
- Transcoder<UTF8<>, TargetEncoding>::Transcode(srcStream, dstStream);
+ Transcoder<UTF8<typename TargetEncoding::Ch>, TargetEncoding>::Transcode(srcStream, dstStream);
}
dstStream.Put('\0');
const typename TargetEncoding::Ch* str = dstStream.Pop();
@@ -1691,7 +1707,7 @@ private:
}
else {
size_t length = s.Length();
- const char* decimal = s.Pop(); // Pop stack no matter if it will be used or not.
+ const NumberCharacter* decimal = s.Pop(); // Pop stack no matter if it will be used or not.
if (useDouble) {
int p = exp + expFrac;
diff --git a/src/native/external/rapidjson/schema.h b/src/native/external/rapidjson/schema.h
deleted file mode 100644
index 26ae94748063..000000000000
--- a/src/native/external/rapidjson/schema.h
+++ /dev/null
@@ -1,2497 +0,0 @@
-// Tencent is pleased to support the open source community by making RapidJSON available->
-//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip-> All rights reserved->
-//
-// Licensed under the MIT License (the "License"); you may not use this file except
-// in compliance with the License-> You may obtain a copy of the License at
-//
-// http://opensource->org/licenses/MIT
-//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied-> See the License for the
-// specific language governing permissions and limitations under the License->
-
-#ifndef RAPIDJSON_SCHEMA_H_
-#define RAPIDJSON_SCHEMA_H_
-
-#include "document.h"
-#include "pointer.h"
-#include "stringbuffer.h"
-#include <cmath> // abs, floor
-
-#if !defined(RAPIDJSON_SCHEMA_USE_INTERNALREGEX)
-#define RAPIDJSON_SCHEMA_USE_INTERNALREGEX 1
-#else
-#define RAPIDJSON_SCHEMA_USE_INTERNALREGEX 0
-#endif
-
-#if !RAPIDJSON_SCHEMA_USE_INTERNALREGEX && defined(RAPIDJSON_SCHEMA_USE_STDREGEX) && (__cplusplus >=201103L || (defined(_MSC_VER) && _MSC_VER >= 1800))
-#define RAPIDJSON_SCHEMA_USE_STDREGEX 1
-#else
-#define RAPIDJSON_SCHEMA_USE_STDREGEX 0
-#endif
-
-#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX
-#include "internal/regex.h"
-#elif RAPIDJSON_SCHEMA_USE_STDREGEX
-#include <regex>
-#endif
-
-#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX || RAPIDJSON_SCHEMA_USE_STDREGEX
-#define RAPIDJSON_SCHEMA_HAS_REGEX 1
-#else
-#define RAPIDJSON_SCHEMA_HAS_REGEX 0
-#endif
-
-#ifndef RAPIDJSON_SCHEMA_VERBOSE
-#define RAPIDJSON_SCHEMA_VERBOSE 0
-#endif
-
-#if RAPIDJSON_SCHEMA_VERBOSE
-#include "stringbuffer.h"
-#endif
-
-RAPIDJSON_DIAG_PUSH
-
-#if defined(__GNUC__)
-RAPIDJSON_DIAG_OFF(effc++)
-#endif
-
-#ifdef __clang__
-RAPIDJSON_DIAG_OFF(weak-vtables)
-RAPIDJSON_DIAG_OFF(exit-time-destructors)
-RAPIDJSON_DIAG_OFF(c++98-compat-pedantic)
-RAPIDJSON_DIAG_OFF(variadic-macros)
-#elif defined(_MSC_VER)
-RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
-#endif
-
-RAPIDJSON_NAMESPACE_BEGIN
-
-///////////////////////////////////////////////////////////////////////////////
-// Verbose Utilities
-
-#if RAPIDJSON_SCHEMA_VERBOSE
-
-namespace internal {
-
-inline void PrintInvalidKeyword(const char* keyword) {
- printf("Fail keyword: %s\n", keyword);
-}
-
-inline void PrintInvalidKeyword(const wchar_t* keyword) {
- wprintf(L"Fail keyword: %ls\n", keyword);
-}
-
-inline void PrintInvalidDocument(const char* document) {
- printf("Fail document: %s\n\n", document);
-}
-
-inline void PrintInvalidDocument(const wchar_t* document) {
- wprintf(L"Fail document: %ls\n\n", document);
-}
-
-inline void PrintValidatorPointers(unsigned depth, const char* s, const char* d) {
- printf("S: %*s%s\nD: %*s%s\n\n", depth * 4, " ", s, depth * 4, " ", d);
-}
-
-inline void PrintValidatorPointers(unsigned depth, const wchar_t* s, const wchar_t* d) {
- wprintf(L"S: %*ls%ls\nD: %*ls%ls\n\n", depth * 4, L" ", s, depth * 4, L" ", d);
-}
-
-} // namespace internal
-
-#endif // RAPIDJSON_SCHEMA_VERBOSE
-
-///////////////////////////////////////////////////////////////////////////////
-// RAPIDJSON_INVALID_KEYWORD_RETURN
-
-#if RAPIDJSON_SCHEMA_VERBOSE
-#define RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword) internal::PrintInvalidKeyword(keyword)
-#else
-#define RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword)
-#endif
-
-#define RAPIDJSON_INVALID_KEYWORD_RETURN(keyword)\
-RAPIDJSON_MULTILINEMACRO_BEGIN\
- context.invalidKeyword = keyword.GetString();\
- RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword.GetString());\
- return false;\
-RAPIDJSON_MULTILINEMACRO_END
-
-///////////////////////////////////////////////////////////////////////////////
-// Forward declarations
-
-template <typename ValueType, typename Allocator>
-class GenericSchemaDocument;
-
-namespace internal {
-
-template <typename SchemaDocumentType>
-class Schema;
-
-///////////////////////////////////////////////////////////////////////////////
-// ISchemaValidator
-
-class ISchemaValidator {
-public:
- virtual ~ISchemaValidator() {}
- virtual bool IsValid() const = 0;
-};
-
-///////////////////////////////////////////////////////////////////////////////
-// ISchemaStateFactory
-
-template <typename SchemaType>
-class ISchemaStateFactory {
-public:
- virtual ~ISchemaStateFactory() {}
- virtual ISchemaValidator* CreateSchemaValidator(const SchemaType&) = 0;
- virtual void DestroySchemaValidator(ISchemaValidator* validator) = 0;
- virtual void* CreateHasher() = 0;
- virtual uint64_t GetHashCode(void* hasher) = 0;
- virtual void DestroryHasher(void* hasher) = 0;
- virtual void* MallocState(size_t size) = 0;
- virtual void FreeState(void* p) = 0;
-};
-
-///////////////////////////////////////////////////////////////////////////////
-// IValidationErrorHandler
-
-template <typename SchemaType>
-class IValidationErrorHandler {
-public:
- typedef typename SchemaType::Ch Ch;
- typedef typename SchemaType::SValue SValue;
-
- virtual ~IValidationErrorHandler() {}
-
- virtual void NotMultipleOf(int64_t actual, const SValue& expected) = 0;
- virtual void NotMultipleOf(uint64_t actual, const SValue& expected) = 0;
- virtual void NotMultipleOf(double actual, const SValue& expected) = 0;
- virtual void AboveMaximum(int64_t actual, const SValue& expected, bool exclusive) = 0;
- virtual void AboveMaximum(uint64_t actual, const SValue& expected, bool exclusive) = 0;
- virtual void AboveMaximum(double actual, const SValue& expected, bool exclusive) = 0;
- virtual void BelowMinimum(int64_t actual, const SValue& expected, bool exclusive) = 0;
- virtual void BelowMinimum(uint64_t actual, const SValue& expected, bool exclusive) = 0;
- virtual void BelowMinimum(double actual, const SValue& expected, bool exclusive) = 0;
-
- virtual void TooLong(const Ch* str, SizeType length, SizeType expected) = 0;
- virtual void TooShort(const Ch* str, SizeType length, SizeType expected) = 0;
- virtual void DoesNotMatch(const Ch* str, SizeType length) = 0;
-
- virtual void DisallowedItem(SizeType index) = 0;
- virtual void TooFewItems(SizeType actualCount, SizeType expectedCount) = 0;
- virtual void TooManyItems(SizeType actualCount, SizeType expectedCount) = 0;
- virtual void DuplicateItems(SizeType index1, SizeType index2) = 0;
-
- virtual void TooManyProperties(SizeType actualCount, SizeType expectedCount) = 0;
- virtual void TooFewProperties(SizeType actualCount, SizeType expectedCount) = 0;
- virtual void StartMissingProperties() = 0;
- virtual void AddMissingProperty(const SValue& name) = 0;
- virtual bool EndMissingProperties() = 0;
- virtual void PropertyViolations(ISchemaValidator** subvalidators, SizeType count) = 0;
- virtual void DisallowedProperty(const Ch* name, SizeType length) = 0;
-
- virtual void StartDependencyErrors() = 0;
- virtual void StartMissingDependentProperties() = 0;
- virtual void AddMissingDependentProperty(const SValue& targetName) = 0;
- virtual void EndMissingDependentProperties(const SValue& sourceName) = 0;
- virtual void AddDependencySchemaError(const SValue& souceName, ISchemaValidator* subvalidator) = 0;
- virtual bool EndDependencyErrors() = 0;
-
- virtual void DisallowedValue() = 0;
- virtual void StartDisallowedType() = 0;
- virtual void AddExpectedType(const typename SchemaType::ValueType& expectedType) = 0;
- virtual void EndDisallowedType(const typename SchemaType::ValueType& actualType) = 0;
- virtual void NotAllOf(ISchemaValidator** subvalidators, SizeType count) = 0;
- virtual void NoneOf(ISchemaValidator** subvalidators, SizeType count) = 0;
- virtual void NotOneOf(ISchemaValidator** subvalidators, SizeType count) = 0;
- virtual void Disallowed() = 0;
-};
-
-
-///////////////////////////////////////////////////////////////////////////////
-// Hasher
-
-// For comparison of compound value
-template<typename Encoding, typename Allocator>
-class Hasher {
-public:
- typedef typename Encoding::Ch Ch;
-
- Hasher(Allocator* allocator = 0, size_t stackCapacity = kDefaultSize) : stack_(allocator, stackCapacity) {}
-
- bool Null() { return WriteType(kNullType); }
- bool Bool(bool b) { return WriteType(b ? kTrueType : kFalseType); }
- bool Int(int i) { Number n; n.u.i = i; n.d = static_cast<double>(i); return WriteNumber(n); }
- bool Uint(unsigned u) { Number n; n.u.u = u; n.d = static_cast<double>(u); return WriteNumber(n); }
- bool Int64(int64_t i) { Number n; n.u.i = i; n.d = static_cast<double>(i); return WriteNumber(n); }
- bool Uint64(uint64_t u) { Number n; n.u.u = u; n.d = static_cast<double>(u); return WriteNumber(n); }
- bool Double(double d) {
- Number n;
- if (d < 0) n.u.i = static_cast<int64_t>(d);
- else n.u.u = static_cast<uint64_t>(d);
- n.d = d;
- return WriteNumber(n);
- }
-
- bool RawNumber(const Ch* str, SizeType len, bool) {
- WriteBuffer(kNumberType, str, len * sizeof(Ch));
- return true;
- }
-
- bool String(const Ch* str, SizeType len, bool) {
- WriteBuffer(kStringType, str, len * sizeof(Ch));
- return true;
- }
-
- bool StartObject() { return true; }
- bool Key(const Ch* str, SizeType len, bool copy) { return String(str, len, copy); }
- bool EndObject(SizeType memberCount) {
- uint64_t h = Hash(0, kObjectType);
- uint64_t* kv = stack_.template Pop<uint64_t>(memberCount * 2);
- for (SizeType i = 0; i < memberCount; i++)
- h ^= Hash(kv[i * 2], kv[i * 2 + 1]); // Use xor to achieve member order insensitive
- *stack_.template Push<uint64_t>() = h;
- return true;
- }
-
- bool StartArray() { return true; }
- bool EndArray(SizeType elementCount) {
- uint64_t h = Hash(0, kArrayType);
- uint64_t* e = stack_.template Pop<uint64_t>(elementCount);
- for (SizeType i = 0; i < elementCount; i++)
- h = Hash(h, e[i]); // Use hash to achieve element order sensitive
- *stack_.template Push<uint64_t>() = h;
- return true;
- }
-
- bool IsValid() const { return stack_.GetSize() == sizeof(uint64_t); }
-
- uint64_t GetHashCode() const {
- RAPIDJSON_ASSERT(IsValid());
- return *stack_.template Top<uint64_t>();
- }
-
-private:
- static const size_t kDefaultSize = 256;
- struct Number {
- union U {
- uint64_t u;
- int64_t i;
- }u;
- double d;
- };
-
- bool WriteType(Type type) { return WriteBuffer(type, 0, 0); }
-
- bool WriteNumber(const Number& n) { return WriteBuffer(kNumberType, &n, sizeof(n)); }
-
- bool WriteBuffer(Type type, const void* data, size_t len) {
- // FNV-1a from http://isthe.com/chongo/tech/comp/fnv/
- uint64_t h = Hash(RAPIDJSON_UINT64_C2(0x84222325, 0xcbf29ce4), type);
- const unsigned char* d = static_cast<const unsigned char*>(data);
- for (size_t i = 0; i < len; i++)
- h = Hash(h, d[i]);
- *stack_.template Push<uint64_t>() = h;
- return true;
- }
-
- static uint64_t Hash(uint64_t h, uint64_t d) {
- static const uint64_t kPrime = RAPIDJSON_UINT64_C2(0x00000100, 0x000001b3);
- h ^= d;
- h *= kPrime;
- return h;
- }
-
- Stack<Allocator> stack_;
-};
-
-///////////////////////////////////////////////////////////////////////////////
-// SchemaValidationContext
-
-template <typename SchemaDocumentType>
-struct SchemaValidationContext {
- typedef Schema<SchemaDocumentType> SchemaType;
- typedef ISchemaStateFactory<SchemaType> SchemaValidatorFactoryType;
- typedef IValidationErrorHandler<SchemaType> ErrorHandlerType;
- typedef typename SchemaType::ValueType ValueType;
- typedef typename ValueType::Ch Ch;
-
- enum PatternValidatorType {
- kPatternValidatorOnly,
- kPatternValidatorWithProperty,
- kPatternValidatorWithAdditionalProperty
- };
-
- SchemaValidationContext(SchemaValidatorFactoryType& f, ErrorHandlerType& eh, const SchemaType* s) :
- factory(f),
- error_handler(eh),
- schema(s),
- valueSchema(),
- invalidKeyword(),
- hasher(),
- arrayElementHashCodes(),
- validators(),
- validatorCount(),
- patternPropertiesValidators(),
- patternPropertiesValidatorCount(),
- patternPropertiesSchemas(),
- patternPropertiesSchemaCount(),
- valuePatternValidatorType(kPatternValidatorOnly),
- propertyExist(),
- inArray(false),
- valueUniqueness(false),
- arrayUniqueness(false)
- {
- }
-
- ~SchemaValidationContext() {
- if (hasher)
- factory.DestroryHasher(hasher);
- if (validators) {
- for (SizeType i = 0; i < validatorCount; i++)
- factory.DestroySchemaValidator(validators[i]);
- factory.FreeState(validators);
- }
- if (patternPropertiesValidators) {
- for (SizeType i = 0; i < patternPropertiesValidatorCount; i++)
- factory.DestroySchemaValidator(patternPropertiesValidators[i]);
- factory.FreeState(patternPropertiesValidators);
- }
- if (patternPropertiesSchemas)
- factory.FreeState(patternPropertiesSchemas);
- if (propertyExist)
- factory.FreeState(propertyExist);
- }
-
- SchemaValidatorFactoryType& factory;
- ErrorHandlerType& error_handler;
- const SchemaType* schema;
- const SchemaType* valueSchema;
- const Ch* invalidKeyword;
- void* hasher; // Only validator access
- void* arrayElementHashCodes; // Only validator access this
- ISchemaValidator** validators;
- SizeType validatorCount;
- ISchemaValidator** patternPropertiesValidators;
- SizeType patternPropertiesValidatorCount;
- const SchemaType** patternPropertiesSchemas;
- SizeType patternPropertiesSchemaCount;
- PatternValidatorType valuePatternValidatorType;
- PatternValidatorType objectPatternValidatorType;
- SizeType arrayElementIndex;
- bool* propertyExist;
- bool inArray;
- bool valueUniqueness;
- bool arrayUniqueness;
-};
-
-///////////////////////////////////////////////////////////////////////////////
-// Schema
-
-template <typename SchemaDocumentType>
-class Schema {
-public:
- typedef typename SchemaDocumentType::ValueType ValueType;
- typedef typename SchemaDocumentType::AllocatorType AllocatorType;
- typedef typename SchemaDocumentType::PointerType PointerType;
- typedef typename ValueType::EncodingType EncodingType;
- typedef typename EncodingType::Ch Ch;
- typedef SchemaValidationContext<SchemaDocumentType> Context;
- typedef Schema<SchemaDocumentType> SchemaType;
- typedef GenericValue<EncodingType, AllocatorType> SValue;
- typedef IValidationErrorHandler<Schema> ErrorHandler;
- friend class GenericSchemaDocument<ValueType, AllocatorType>;
-
- Schema(SchemaDocumentType* schemaDocument, const PointerType& p, const ValueType& value, const ValueType& document, AllocatorType* allocator) :
- allocator_(allocator),
- uri_(schemaDocument->GetURI(), *allocator),
- pointer_(p, allocator),
- typeless_(schemaDocument->GetTypeless()),
- enum_(),
- enumCount_(),
- not_(),
- type_((1 << kTotalSchemaType) - 1), // typeless
- validatorCount_(),
- notValidatorIndex_(),
- properties_(),
- additionalPropertiesSchema_(),
- patternProperties_(),
- patternPropertyCount_(),
- propertyCount_(),
- minProperties_(),
- maxProperties_(SizeType(~0)),
- additionalProperties_(true),
- hasDependencies_(),
- hasRequired_(),
- hasSchemaDependencies_(),
- additionalItemsSchema_(),
- itemsList_(),
- itemsTuple_(),
- itemsTupleCount_(),
- minItems_(),
- maxItems_(SizeType(~0)),
- additionalItems_(true),
- uniqueItems_(false),
- pattern_(),
- minLength_(0),
- maxLength_(~SizeType(0)),
- exclusiveMinimum_(false),
- exclusiveMaximum_(false),
- defaultValueLength_(0)
- {
- typedef typename SchemaDocumentType::ValueType ValueType;
- typedef typename ValueType::ConstValueIterator ConstValueIterator;
- typedef typename ValueType::ConstMemberIterator ConstMemberIterator;
-
- if (!value.IsObject())
- return;
-
- if (const ValueType* v = GetMember(value, GetTypeString())) {
- type_ = 0;
- if (v->IsString())
- AddType(*v);
- else if (v->IsArray())
- for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr)
- AddType(*itr);
- }
-
- if (const ValueType* v = GetMember(value, GetEnumString()))
- if (v->IsArray() && v->Size() > 0) {
- enum_ = static_cast<uint64_t*>(allocator_->Malloc(sizeof(uint64_t) * v->Size()));
- for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr) {
- typedef Hasher<EncodingType, MemoryPoolAllocator<> > EnumHasherType;
- char buffer[256u + 24];
- MemoryPoolAllocator<> hasherAllocator(buffer, sizeof(buffer));
- EnumHasherType h(&hasherAllocator, 256);
- itr->Accept(h);
- enum_[enumCount_++] = h.GetHashCode();
- }
- }
-
- if (schemaDocument) {
- AssignIfExist(allOf_, *schemaDocument, p, value, GetAllOfString(), document);
- AssignIfExist(anyOf_, *schemaDocument, p, value, GetAnyOfString(), document);
- AssignIfExist(oneOf_, *schemaDocument, p, value, GetOneOfString(), document);
- }
-
- if (const ValueType* v = GetMember(value, GetNotString())) {
- schemaDocument->CreateSchema(&not_, p.Append(GetNotString(), allocator_), *v, document);
- notValidatorIndex_ = validatorCount_;
- validatorCount_++;
- }
-
- // Object
-
- const ValueType* properties = GetMember(value, GetPropertiesString());
- const ValueType* required = GetMember(value, GetRequiredString());
- const ValueType* dependencies = GetMember(value, GetDependenciesString());
- {
- // Gather properties from properties/required/dependencies
- SValue allProperties(kArrayType);
-
- if (properties && properties->IsObject())
- for (ConstMemberIterator itr = properties->MemberBegin(); itr != properties->MemberEnd(); ++itr)
- AddUniqueElement(allProperties, itr->name);
-
- if (required && required->IsArray())
- for (ConstValueIterator itr = required->Begin(); itr != required->End(); ++itr)
- if (itr->IsString())
- AddUniqueElement(allProperties, *itr);
-
- if (dependencies && dependencies->IsObject())
- for (ConstMemberIterator itr = dependencies->MemberBegin(); itr != dependencies->MemberEnd(); ++itr) {
- AddUniqueElement(allProperties, itr->name);
- if (itr->value.IsArray())
- for (ConstValueIterator i = itr->value.Begin(); i != itr->value.End(); ++i)
- if (i->IsString())
- AddUniqueElement(allProperties, *i);
- }
-
- if (allProperties.Size() > 0) {
- propertyCount_ = allProperties.Size();
- properties_ = static_cast<Property*>(allocator_->Malloc(sizeof(Property) * propertyCount_));
- for (SizeType i = 0; i < propertyCount_; i++) {
- new (&properties_[i]) Property();
- properties_[i].name = allProperties[i];
- properties_[i].schema = typeless_;
- }
- }
- }
-
- if (properties && properties->IsObject()) {
- PointerType q = p.Append(GetPropertiesString(), allocator_);
- for (ConstMemberIterator itr = properties->MemberBegin(); itr != properties->MemberEnd(); ++itr) {
- SizeType index;
- if (FindPropertyIndex(itr->name, &index))
- schemaDocument->CreateSchema(&properties_[index].schema, q.Append(itr->name, allocator_), itr->value, document);
- }
- }
-
- if (const ValueType* v = GetMember(value, GetPatternPropertiesString())) {
- PointerType q = p.Append(GetPatternPropertiesString(), allocator_);
- patternProperties_ = static_cast<PatternProperty*>(allocator_->Malloc(sizeof(PatternProperty) * v->MemberCount()));
- patternPropertyCount_ = 0;
-
- for (ConstMemberIterator itr = v->MemberBegin(); itr != v->MemberEnd(); ++itr) {
- new (&patternProperties_[patternPropertyCount_]) PatternProperty();
- patternProperties_[patternPropertyCount_].pattern = CreatePattern(itr->name);
- schemaDocument->CreateSchema(&patternProperties_[patternPropertyCount_].schema, q.Append(itr->name, allocator_), itr->value, document);
- patternPropertyCount_++;
- }
- }
-
- if (required && required->IsArray())
- for (ConstValueIterator itr = required->Begin(); itr != required->End(); ++itr)
- if (itr->IsString()) {
- SizeType index;
- if (FindPropertyIndex(*itr, &index)) {
- properties_[index].required = true;
- hasRequired_ = true;
- }
- }
-
- if (dependencies && dependencies->IsObject()) {
- PointerType q = p.Append(GetDependenciesString(), allocator_);
- hasDependencies_ = true;
- for (ConstMemberIterator itr = dependencies->MemberBegin(); itr != dependencies->MemberEnd(); ++itr) {
- SizeType sourceIndex;
- if (FindPropertyIndex(itr->name, &sourceIndex)) {
- if (itr->value.IsArray()) {
- properties_[sourceIndex].dependencies = static_cast<bool*>(allocator_->Malloc(sizeof(bool) * propertyCount_));
- std::memset(properties_[sourceIndex].dependencies, 0, sizeof(bool)* propertyCount_);
- for (ConstValueIterator targetItr = itr->value.Begin(); targetItr != itr->value.End(); ++targetItr) {
- SizeType targetIndex;
- if (FindPropertyIndex(*targetItr, &targetIndex))
- properties_[sourceIndex].dependencies[targetIndex] = true;
- }
- }
- else if (itr->value.IsObject()) {
- hasSchemaDependencies_ = true;
- schemaDocument->CreateSchema(&properties_[sourceIndex].dependenciesSchema, q.Append(itr->name, allocator_), itr->value, document);
- properties_[sourceIndex].dependenciesValidatorIndex = validatorCount_;
- validatorCount_++;
- }
- }
- }
- }
-
- if (const ValueType* v = GetMember(value, GetAdditionalPropertiesString())) {
- if (v->IsBool())
- additionalProperties_ = v->GetBool();
- else if (v->IsObject())
- schemaDocument->CreateSchema(&additionalPropertiesSchema_, p.Append(GetAdditionalPropertiesString(), allocator_), *v, document);
- }
-
- AssignIfExist(minProperties_, value, GetMinPropertiesString());
- AssignIfExist(maxProperties_, value, GetMaxPropertiesString());
-
- // Array
- if (const ValueType* v = GetMember(value, GetItemsString())) {
- PointerType q = p.Append(GetItemsString(), allocator_);
- if (v->IsObject()) // List validation
- schemaDocument->CreateSchema(&itemsList_, q, *v, document);
- else if (v->IsArray()) { // Tuple validation
- itemsTuple_ = static_cast<const Schema**>(allocator_->Malloc(sizeof(const Schema*) * v->Size()));
- SizeType index = 0;
- for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr, index++)
- schemaDocument->CreateSchema(&itemsTuple_[itemsTupleCount_++], q.Append(index, allocator_), *itr, document);
- }
- }
-
- AssignIfExist(minItems_, value, GetMinItemsString());
- AssignIfExist(maxItems_, value, GetMaxItemsString());
-
- if (const ValueType* v = GetMember(value, GetAdditionalItemsString())) {
- if (v->IsBool())
- additionalItems_ = v->GetBool();
- else if (v->IsObject())
- schemaDocument->CreateSchema(&additionalItemsSchema_, p.Append(GetAdditionalItemsString(), allocator_), *v, document);
- }
-
- AssignIfExist(uniqueItems_, value, GetUniqueItemsString());
-
- // String
- AssignIfExist(minLength_, value, GetMinLengthString());
- AssignIfExist(maxLength_, value, GetMaxLengthString());
-
- if (const ValueType* v = GetMember(value, GetPatternString()))
- pattern_ = CreatePattern(*v);
-
- // Number
- if (const ValueType* v = GetMember(value, GetMinimumString()))
- if (v->IsNumber())
- minimum_.CopyFrom(*v, *allocator_);
-
- if (const ValueType* v = GetMember(value, GetMaximumString()))
- if (v->IsNumber())
- maximum_.CopyFrom(*v, *allocator_);
-
- AssignIfExist(exclusiveMinimum_, value, GetExclusiveMinimumString());
- AssignIfExist(exclusiveMaximum_, value, GetExclusiveMaximumString());
-
- if (const ValueType* v = GetMember(value, GetMultipleOfString()))
- if (v->IsNumber() && v->GetDouble() > 0.0)
- multipleOf_.CopyFrom(*v, *allocator_);
-
- // Default
- if (const ValueType* v = GetMember(value, GetDefaultValueString()))
- if (v->IsString())
- defaultValueLength_ = v->GetStringLength();
-
- }
-
- ~Schema() {
- AllocatorType::Free(enum_);
- if (properties_) {
- for (SizeType i = 0; i < propertyCount_; i++)
- properties_[i].~Property();
- AllocatorType::Free(properties_);
- }
- if (patternProperties_) {
- for (SizeType i = 0; i < patternPropertyCount_; i++)
- patternProperties_[i].~PatternProperty();
- AllocatorType::Free(patternProperties_);
- }
- AllocatorType::Free(itemsTuple_);
-#if RAPIDJSON_SCHEMA_HAS_REGEX
- if (pattern_) {
- pattern_->~RegexType();
- AllocatorType::Free(pattern_);
- }
-#endif
- }
-
- const SValue& GetURI() const {
- return uri_;
- }
-
- const PointerType& GetPointer() const {
- return pointer_;
- }
-
- bool BeginValue(Context& context) const {
- if (context.inArray) {
- if (uniqueItems_)
- context.valueUniqueness = true;
-
- if (itemsList_)
- context.valueSchema = itemsList_;
- else if (itemsTuple_) {
- if (context.arrayElementIndex < itemsTupleCount_)
- context.valueSchema = itemsTuple_[context.arrayElementIndex];
- else if (additionalItemsSchema_)
- context.valueSchema = additionalItemsSchema_;
- else if (additionalItems_)
- context.valueSchema = typeless_;
- else {
- context.error_handler.DisallowedItem(context.arrayElementIndex);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetItemsString());
- }
- }
- else
- context.valueSchema = typeless_;
-
- context.arrayElementIndex++;
- }
- return true;
- }
-
- RAPIDJSON_FORCEINLINE bool EndValue(Context& context) const {
- if (context.patternPropertiesValidatorCount > 0) {
- bool otherValid = false;
- SizeType count = context.patternPropertiesValidatorCount;
- if (context.objectPatternValidatorType != Context::kPatternValidatorOnly)
- otherValid = context.patternPropertiesValidators[--count]->IsValid();
-
- bool patternValid = true;
- for (SizeType i = 0; i < count; i++)
- if (!context.patternPropertiesValidators[i]->IsValid()) {
- patternValid = false;
- break;
- }
-
- if (context.objectPatternValidatorType == Context::kPatternValidatorOnly) {
- if (!patternValid) {
- context.error_handler.PropertyViolations(context.patternPropertiesValidators, count);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString());
- }
- }
- else if (context.objectPatternValidatorType == Context::kPatternValidatorWithProperty) {
- if (!patternValid || !otherValid) {
- context.error_handler.PropertyViolations(context.patternPropertiesValidators, count + 1);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString());
- }
- }
- else if (!patternValid && !otherValid) { // kPatternValidatorWithAdditionalProperty)
- context.error_handler.PropertyViolations(context.patternPropertiesValidators, count + 1);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString());
- }
- }
-
- if (enum_) {
- const uint64_t h = context.factory.GetHashCode(context.hasher);
- for (SizeType i = 0; i < enumCount_; i++)
- if (enum_[i] == h)
- goto foundEnum;
- context.error_handler.DisallowedValue();
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetEnumString());
- foundEnum:;
- }
-
- if (allOf_.schemas)
- for (SizeType i = allOf_.begin; i < allOf_.begin + allOf_.count; i++)
- if (!context.validators[i]->IsValid()) {
- context.error_handler.NotAllOf(&context.validators[allOf_.begin], allOf_.count);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetAllOfString());
- }
-
- if (anyOf_.schemas) {
- for (SizeType i = anyOf_.begin; i < anyOf_.begin + anyOf_.count; i++)
- if (context.validators[i]->IsValid())
- goto foundAny;
- context.error_handler.NoneOf(&context.validators[anyOf_.begin], anyOf_.count);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetAnyOfString());
- foundAny:;
- }
-
- if (oneOf_.schemas) {
- bool oneValid = false;
- for (SizeType i = oneOf_.begin; i < oneOf_.begin + oneOf_.count; i++)
- if (context.validators[i]->IsValid()) {
- if (oneValid) {
- context.error_handler.NotOneOf(&context.validators[oneOf_.begin], oneOf_.count);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetOneOfString());
- } else
- oneValid = true;
- }
- if (!oneValid) {
- context.error_handler.NotOneOf(&context.validators[oneOf_.begin], oneOf_.count);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetOneOfString());
- }
- }
-
- if (not_ && context.validators[notValidatorIndex_]->IsValid()) {
- context.error_handler.Disallowed();
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetNotString());
- }
-
- return true;
- }
-
- bool Null(Context& context) const {
- if (!(type_ & (1 << kNullSchemaType))) {
- DisallowedType(context, GetNullString());
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
- }
- return CreateParallelValidator(context);
- }
-
- bool Bool(Context& context, bool) const {
- if (!(type_ & (1 << kBooleanSchemaType))) {
- DisallowedType(context, GetBooleanString());
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
- }
- return CreateParallelValidator(context);
- }
-
- bool Int(Context& context, int i) const {
- if (!CheckInt(context, i))
- return false;
- return CreateParallelValidator(context);
- }
-
- bool Uint(Context& context, unsigned u) const {
- if (!CheckUint(context, u))
- return false;
- return CreateParallelValidator(context);
- }
-
- bool Int64(Context& context, int64_t i) const {
- if (!CheckInt(context, i))
- return false;
- return CreateParallelValidator(context);
- }
-
- bool Uint64(Context& context, uint64_t u) const {
- if (!CheckUint(context, u))
- return false;
- return CreateParallelValidator(context);
- }
-
- bool Double(Context& context, double d) const {
- if (!(type_ & (1 << kNumberSchemaType))) {
- DisallowedType(context, GetNumberString());
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
- }
-
- if (!minimum_.IsNull() && !CheckDoubleMinimum(context, d))
- return false;
-
- if (!maximum_.IsNull() && !CheckDoubleMaximum(context, d))
- return false;
-
- if (!multipleOf_.IsNull() && !CheckDoubleMultipleOf(context, d))
- return false;
-
- return CreateParallelValidator(context);
- }
-
- bool String(Context& context, const Ch* str, SizeType length, bool) const {
- if (!(type_ & (1 << kStringSchemaType))) {
- DisallowedType(context, GetStringString());
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
- }
-
- if (minLength_ != 0 || maxLength_ != SizeType(~0)) {
- SizeType count;
- if (internal::CountStringCodePoint<EncodingType>(str, length, &count)) {
- if (count < minLength_) {
- context.error_handler.TooShort(str, length, minLength_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinLengthString());
- }
- if (count > maxLength_) {
- context.error_handler.TooLong(str, length, maxLength_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxLengthString());
- }
- }
- }
-
- if (pattern_ && !IsPatternMatch(pattern_, str, length)) {
- context.error_handler.DoesNotMatch(str, length);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternString());
- }
-
- return CreateParallelValidator(context);
- }
-
- bool StartObject(Context& context) const {
- if (!(type_ & (1 << kObjectSchemaType))) {
- DisallowedType(context, GetObjectString());
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
- }
-
- if (hasDependencies_ || hasRequired_) {
- context.propertyExist = static_cast<bool*>(context.factory.MallocState(sizeof(bool) * propertyCount_));
- std::memset(context.propertyExist, 0, sizeof(bool) * propertyCount_);
- }
-
- if (patternProperties_) { // pre-allocate schema array
- SizeType count = patternPropertyCount_ + 1; // extra for valuePatternValidatorType
- context.patternPropertiesSchemas = static_cast<const SchemaType**>(context.factory.MallocState(sizeof(const SchemaType*) * count));
- context.patternPropertiesSchemaCount = 0;
- std::memset(context.patternPropertiesSchemas, 0, sizeof(SchemaType*) * count);
- }
-
- return CreateParallelValidator(context);
- }
-
- bool Key(Context& context, const Ch* str, SizeType len, bool) const {
- if (patternProperties_) {
- context.patternPropertiesSchemaCount = 0;
- for (SizeType i = 0; i < patternPropertyCount_; i++)
- if (patternProperties_[i].pattern && IsPatternMatch(patternProperties_[i].pattern, str, len)) {
- context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = patternProperties_[i].schema;
- context.valueSchema = typeless_;
- }
- }
-
- SizeType index;
- if (FindPropertyIndex(ValueType(str, len).Move(), &index)) {
- if (context.patternPropertiesSchemaCount > 0) {
- context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = properties_[index].schema;
- context.valueSchema = typeless_;
- context.valuePatternValidatorType = Context::kPatternValidatorWithProperty;
- }
- else
- context.valueSchema = properties_[index].schema;
-
- if (context.propertyExist)
- context.propertyExist[index] = true;
-
- return true;
- }
-
- if (additionalPropertiesSchema_) {
- if (additionalPropertiesSchema_ && context.patternPropertiesSchemaCount > 0) {
- context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = additionalPropertiesSchema_;
- context.valueSchema = typeless_;
- context.valuePatternValidatorType = Context::kPatternValidatorWithAdditionalProperty;
- }
- else
- context.valueSchema = additionalPropertiesSchema_;
- return true;
- }
- else if (additionalProperties_) {
- context.valueSchema = typeless_;
- return true;
- }
-
- if (context.patternPropertiesSchemaCount == 0) { // patternProperties are not additional properties
- context.error_handler.DisallowedProperty(str, len);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetAdditionalPropertiesString());
- }
-
- return true;
- }
-
- bool EndObject(Context& context, SizeType memberCount) const {
- if (hasRequired_) {
- context.error_handler.StartMissingProperties();
- for (SizeType index = 0; index < propertyCount_; index++)
- if (properties_[index].required && !context.propertyExist[index])
- if (properties_[index].schema->defaultValueLength_ == 0 )
- context.error_handler.AddMissingProperty(properties_[index].name);
- if (context.error_handler.EndMissingProperties())
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetRequiredString());
- }
-
- if (memberCount < minProperties_) {
- context.error_handler.TooFewProperties(memberCount, minProperties_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinPropertiesString());
- }
-
- if (memberCount > maxProperties_) {
- context.error_handler.TooManyProperties(memberCount, maxProperties_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxPropertiesString());
- }
-
- if (hasDependencies_) {
- context.error_handler.StartDependencyErrors();
- for (SizeType sourceIndex = 0; sourceIndex < propertyCount_; sourceIndex++) {
- const Property& source = properties_[sourceIndex];
- if (context.propertyExist[sourceIndex]) {
- if (source.dependencies) {
- context.error_handler.StartMissingDependentProperties();
- for (SizeType targetIndex = 0; targetIndex < propertyCount_; targetIndex++)
- if (source.dependencies[targetIndex] && !context.propertyExist[targetIndex])
- context.error_handler.AddMissingDependentProperty(properties_[targetIndex].name);
- context.error_handler.EndMissingDependentProperties(source.name);
- }
- else if (source.dependenciesSchema) {
- ISchemaValidator* dependenciesValidator = context.validators[source.dependenciesValidatorIndex];
- if (!dependenciesValidator->IsValid())
- context.error_handler.AddDependencySchemaError(source.name, dependenciesValidator);
- }
- }
- }
- if (context.error_handler.EndDependencyErrors())
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetDependenciesString());
- }
-
- return true;
- }
-
- bool StartArray(Context& context) const {
- if (!(type_ & (1 << kArraySchemaType))) {
- DisallowedType(context, GetArrayString());
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
- }
-
- context.arrayElementIndex = 0;
- context.inArray = true;
-
- return CreateParallelValidator(context);
- }
-
- bool EndArray(Context& context, SizeType elementCount) const {
- context.inArray = false;
-
- if (elementCount < minItems_) {
- context.error_handler.TooFewItems(elementCount, minItems_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinItemsString());
- }
-
- if (elementCount > maxItems_) {
- context.error_handler.TooManyItems(elementCount, maxItems_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxItemsString());
- }
-
- return true;
- }
-
- // Generate functions for string literal according to Ch
-#define RAPIDJSON_STRING_(name, ...) \
- static const ValueType& Get##name##String() {\
- static const Ch s[] = { __VA_ARGS__, '\0' };\
- static const ValueType v(s, static_cast<SizeType>(sizeof(s) / sizeof(Ch) - 1));\
- return v;\
- }
-
- RAPIDJSON_STRING_(Null, 'n', 'u', 'l', 'l')
- RAPIDJSON_STRING_(Boolean, 'b', 'o', 'o', 'l', 'e', 'a', 'n')
- RAPIDJSON_STRING_(Object, 'o', 'b', 'j', 'e', 'c', 't')
- RAPIDJSON_STRING_(Array, 'a', 'r', 'r', 'a', 'y')
- RAPIDJSON_STRING_(String, 's', 't', 'r', 'i', 'n', 'g')
- RAPIDJSON_STRING_(Number, 'n', 'u', 'm', 'b', 'e', 'r')
- RAPIDJSON_STRING_(Integer, 'i', 'n', 't', 'e', 'g', 'e', 'r')
- RAPIDJSON_STRING_(Type, 't', 'y', 'p', 'e')
- RAPIDJSON_STRING_(Enum, 'e', 'n', 'u', 'm')
- RAPIDJSON_STRING_(AllOf, 'a', 'l', 'l', 'O', 'f')
- RAPIDJSON_STRING_(AnyOf, 'a', 'n', 'y', 'O', 'f')
- RAPIDJSON_STRING_(OneOf, 'o', 'n', 'e', 'O', 'f')
- RAPIDJSON_STRING_(Not, 'n', 'o', 't')
- RAPIDJSON_STRING_(Properties, 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's')
- RAPIDJSON_STRING_(Required, 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd')
- RAPIDJSON_STRING_(Dependencies, 'd', 'e', 'p', 'e', 'n', 'd', 'e', 'n', 'c', 'i', 'e', 's')
- RAPIDJSON_STRING_(PatternProperties, 'p', 'a', 't', 't', 'e', 'r', 'n', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's')
- RAPIDJSON_STRING_(AdditionalProperties, 'a', 'd', 'd', 'i', 't', 'i', 'o', 'n', 'a', 'l', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's')
- RAPIDJSON_STRING_(MinProperties, 'm', 'i', 'n', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's')
- RAPIDJSON_STRING_(MaxProperties, 'm', 'a', 'x', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's')
- RAPIDJSON_STRING_(Items, 'i', 't', 'e', 'm', 's')
- RAPIDJSON_STRING_(MinItems, 'm', 'i', 'n', 'I', 't', 'e', 'm', 's')
- RAPIDJSON_STRING_(MaxItems, 'm', 'a', 'x', 'I', 't', 'e', 'm', 's')
- RAPIDJSON_STRING_(AdditionalItems, 'a', 'd', 'd', 'i', 't', 'i', 'o', 'n', 'a', 'l', 'I', 't', 'e', 'm', 's')
- RAPIDJSON_STRING_(UniqueItems, 'u', 'n', 'i', 'q', 'u', 'e', 'I', 't', 'e', 'm', 's')
- RAPIDJSON_STRING_(MinLength, 'm', 'i', 'n', 'L', 'e', 'n', 'g', 't', 'h')
- RAPIDJSON_STRING_(MaxLength, 'm', 'a', 'x', 'L', 'e', 'n', 'g', 't', 'h')
- RAPIDJSON_STRING_(Pattern, 'p', 'a', 't', 't', 'e', 'r', 'n')
- RAPIDJSON_STRING_(Minimum, 'm', 'i', 'n', 'i', 'm', 'u', 'm')
- RAPIDJSON_STRING_(Maximum, 'm', 'a', 'x', 'i', 'm', 'u', 'm')
- RAPIDJSON_STRING_(ExclusiveMinimum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'i', 'n', 'i', 'm', 'u', 'm')
- RAPIDJSON_STRING_(ExclusiveMaximum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'a', 'x', 'i', 'm', 'u', 'm')
- RAPIDJSON_STRING_(MultipleOf, 'm', 'u', 'l', 't', 'i', 'p', 'l', 'e', 'O', 'f')
- RAPIDJSON_STRING_(DefaultValue, 'd', 'e', 'f', 'a', 'u', 'l', 't')
-
-#undef RAPIDJSON_STRING_
-
-private:
- enum SchemaValueType {
- kNullSchemaType,
- kBooleanSchemaType,
- kObjectSchemaType,
- kArraySchemaType,
- kStringSchemaType,
- kNumberSchemaType,
- kIntegerSchemaType,
- kTotalSchemaType
- };
-
-#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX
- typedef internal::GenericRegex<EncodingType, AllocatorType> RegexType;
-#elif RAPIDJSON_SCHEMA_USE_STDREGEX
- typedef std::basic_regex<Ch> RegexType;
-#else
- typedef char RegexType;
-#endif
-
- struct SchemaArray {
- SchemaArray() : schemas(), count() {}
- ~SchemaArray() { AllocatorType::Free(schemas); }
- const SchemaType** schemas;
- SizeType begin; // begin index of context.validators
- SizeType count;
- };
-
- template <typename V1, typename V2>
- void AddUniqueElement(V1& a, const V2& v) {
- for (typename V1::ConstValueIterator itr = a.Begin(); itr != a.End(); ++itr)
- if (*itr == v)
- return;
- V1 c(v, *allocator_);
- a.PushBack(c, *allocator_);
- }
-
- static const ValueType* GetMember(const ValueType& value, const ValueType& name) {
- typename ValueType::ConstMemberIterator itr = value.FindMember(name);
- return itr != value.MemberEnd() ? &(itr->value) : 0;
- }
-
- static void AssignIfExist(bool& out, const ValueType& value, const ValueType& name) {
- if (const ValueType* v = GetMember(value, name))
- if (v->IsBool())
- out = v->GetBool();
- }
-
- static void AssignIfExist(SizeType& out, const ValueType& value, const ValueType& name) {
- if (const ValueType* v = GetMember(value, name))
- if (v->IsUint64() && v->GetUint64() <= SizeType(~0))
- out = static_cast<SizeType>(v->GetUint64());
- }
-
- void AssignIfExist(SchemaArray& out, SchemaDocumentType& schemaDocument, const PointerType& p, const ValueType& value, const ValueType& name, const ValueType& document) {
- if (const ValueType* v = GetMember(value, name)) {
- if (v->IsArray() && v->Size() > 0) {
- PointerType q = p.Append(name, allocator_);
- out.count = v->Size();
- out.schemas = static_cast<const Schema**>(allocator_->Malloc(out.count * sizeof(const Schema*)));
- memset(out.schemas, 0, sizeof(Schema*)* out.count);
- for (SizeType i = 0; i < out.count; i++)
- schemaDocument.CreateSchema(&out.schemas[i], q.Append(i, allocator_), (*v)[i], document);
- out.begin = validatorCount_;
- validatorCount_ += out.count;
- }
- }
- }
-
-#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX
- template <typename ValueType>
- RegexType* CreatePattern(const ValueType& value) {
- if (value.IsString()) {
- RegexType* r = new (allocator_->Malloc(sizeof(RegexType))) RegexType(value.GetString(), allocator_);
- if (!r->IsValid()) {
- r->~RegexType();
- AllocatorType::Free(r);
- r = 0;
- }
- return r;
- }
- return 0;
- }
-
- static bool IsPatternMatch(const RegexType* pattern, const Ch *str, SizeType) {
- GenericRegexSearch<RegexType> rs(*pattern);
- return rs.Search(str);
- }
-#elif RAPIDJSON_SCHEMA_USE_STDREGEX
- template <typename ValueType>
- RegexType* CreatePattern(const ValueType& value) {
- if (value.IsString()) {
- RegexType *r = static_cast<RegexType*>(allocator_->Malloc(sizeof(RegexType)));
- try {
- return new (r) RegexType(value.GetString(), std::size_t(value.GetStringLength()), std::regex_constants::ECMAScript);
- }
- catch (const std::regex_error&) {
- AllocatorType::Free(r);
- }
- }
- return 0;
- }
-
- static bool IsPatternMatch(const RegexType* pattern, const Ch *str, SizeType length) {
- std::match_results<const Ch*> r;
- return std::regex_search(str, str + length, r, *pattern);
- }
-#else
- template <typename ValueType>
- RegexType* CreatePattern(const ValueType&) { return 0; }
-
- static bool IsPatternMatch(const RegexType*, const Ch *, SizeType) { return true; }
-#endif // RAPIDJSON_SCHEMA_USE_STDREGEX
-
- void AddType(const ValueType& type) {
- if (type == GetNullString() ) type_ |= 1 << kNullSchemaType;
- else if (type == GetBooleanString()) type_ |= 1 << kBooleanSchemaType;
- else if (type == GetObjectString() ) type_ |= 1 << kObjectSchemaType;
- else if (type == GetArrayString() ) type_ |= 1 << kArraySchemaType;
- else if (type == GetStringString() ) type_ |= 1 << kStringSchemaType;
- else if (type == GetIntegerString()) type_ |= 1 << kIntegerSchemaType;
- else if (type == GetNumberString() ) type_ |= (1 << kNumberSchemaType) | (1 << kIntegerSchemaType);
- }
-
- bool CreateParallelValidator(Context& context) const {
- if (enum_ || context.arrayUniqueness)
- context.hasher = context.factory.CreateHasher();
-
- if (validatorCount_) {
- RAPIDJSON_ASSERT(context.validators == 0);
- context.validators = static_cast<ISchemaValidator**>(context.factory.MallocState(sizeof(ISchemaValidator*) * validatorCount_));
- context.validatorCount = validatorCount_;
-
- if (allOf_.schemas)
- CreateSchemaValidators(context, allOf_);
-
- if (anyOf_.schemas)
- CreateSchemaValidators(context, anyOf_);
-
- if (oneOf_.schemas)
- CreateSchemaValidators(context, oneOf_);
-
- if (not_)
- context.validators[notValidatorIndex_] = context.factory.CreateSchemaValidator(*not_);
-
- if (hasSchemaDependencies_) {
- for (SizeType i = 0; i < propertyCount_; i++)
- if (properties_[i].dependenciesSchema)
- context.validators[properties_[i].dependenciesValidatorIndex] = context.factory.CreateSchemaValidator(*properties_[i].dependenciesSchema);
- }
- }
-
- return true;
- }
-
- void CreateSchemaValidators(Context& context, const SchemaArray& schemas) const {
- for (SizeType i = 0; i < schemas.count; i++)
- context.validators[schemas.begin + i] = context.factory.CreateSchemaValidator(*schemas.schemas[i]);
- }
-
- // O(n)
- bool FindPropertyIndex(const ValueType& name, SizeType* outIndex) const {
- SizeType len = name.GetStringLength();
- const Ch* str = name.GetString();
- for (SizeType index = 0; index < propertyCount_; index++)
- if (properties_[index].name.GetStringLength() == len &&
- (std::memcmp(properties_[index].name.GetString(), str, sizeof(Ch) * len) == 0))
- {
- *outIndex = index;
- return true;
- }
- return false;
- }
-
- bool CheckInt(Context& context, int64_t i) const {
- if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) {
- DisallowedType(context, GetIntegerString());
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
- }
-
- if (!minimum_.IsNull()) {
- if (minimum_.IsInt64()) {
- if (exclusiveMinimum_ ? i <= minimum_.GetInt64() : i < minimum_.GetInt64()) {
- context.error_handler.BelowMinimum(i, minimum_, exclusiveMinimum_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString());
- }
- }
- else if (minimum_.IsUint64()) {
- context.error_handler.BelowMinimum(i, minimum_, exclusiveMinimum_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); // i <= max(int64_t) < minimum.GetUint64()
- }
- else if (!CheckDoubleMinimum(context, static_cast<double>(i)))
- return false;
- }
-
- if (!maximum_.IsNull()) {
- if (maximum_.IsInt64()) {
- if (exclusiveMaximum_ ? i >= maximum_.GetInt64() : i > maximum_.GetInt64()) {
- context.error_handler.AboveMaximum(i, maximum_, exclusiveMaximum_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString());
- }
- }
- else if (maximum_.IsUint64()) { }
- /* do nothing */ // i <= max(int64_t) < maximum_.GetUint64()
- else if (!CheckDoubleMaximum(context, static_cast<double>(i)))
- return false;
- }
-
- if (!multipleOf_.IsNull()) {
- if (multipleOf_.IsUint64()) {
- if (static_cast<uint64_t>(i >= 0 ? i : -i) % multipleOf_.GetUint64() != 0) {
- context.error_handler.NotMultipleOf(i, multipleOf_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString());
- }
- }
- else if (!CheckDoubleMultipleOf(context, static_cast<double>(i)))
- return false;
- }
-
- return true;
- }
-
- bool CheckUint(Context& context, uint64_t i) const {
- if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) {
- DisallowedType(context, GetIntegerString());
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
- }
-
- if (!minimum_.IsNull()) {
- if (minimum_.IsUint64()) {
- if (exclusiveMinimum_ ? i <= minimum_.GetUint64() : i < minimum_.GetUint64()) {
- context.error_handler.BelowMinimum(i, minimum_, exclusiveMinimum_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString());
- }
- }
- else if (minimum_.IsInt64())
- /* do nothing */; // i >= 0 > minimum.Getint64()
- else if (!CheckDoubleMinimum(context, static_cast<double>(i)))
- return false;
- }
-
- if (!maximum_.IsNull()) {
- if (maximum_.IsUint64()) {
- if (exclusiveMaximum_ ? i >= maximum_.GetUint64() : i > maximum_.GetUint64()) {
- context.error_handler.AboveMaximum(i, maximum_, exclusiveMaximum_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString());
- }
- }
- else if (maximum_.IsInt64()) {
- context.error_handler.AboveMaximum(i, maximum_, exclusiveMaximum_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); // i >= 0 > maximum_
- }
- else if (!CheckDoubleMaximum(context, static_cast<double>(i)))
- return false;
- }
-
- if (!multipleOf_.IsNull()) {
- if (multipleOf_.IsUint64()) {
- if (i % multipleOf_.GetUint64() != 0) {
- context.error_handler.NotMultipleOf(i, multipleOf_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString());
- }
- }
- else if (!CheckDoubleMultipleOf(context, static_cast<double>(i)))
- return false;
- }
-
- return true;
- }
-
- bool CheckDoubleMinimum(Context& context, double d) const {
- if (exclusiveMinimum_ ? d <= minimum_.GetDouble() : d < minimum_.GetDouble()) {
- context.error_handler.BelowMinimum(d, minimum_, exclusiveMinimum_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString());
- }
- return true;
- }
-
- bool CheckDoubleMaximum(Context& context, double d) const {
- if (exclusiveMaximum_ ? d >= maximum_.GetDouble() : d > maximum_.GetDouble()) {
- context.error_handler.AboveMaximum(d, maximum_, exclusiveMaximum_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString());
- }
- return true;
- }
-
- bool CheckDoubleMultipleOf(Context& context, double d) const {
- double a = std::abs(d), b = std::abs(multipleOf_.GetDouble());
- double q = std::floor(a / b);
- double r = a - q * b;
- if (r > 0.0) {
- context.error_handler.NotMultipleOf(d, multipleOf_);
- RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString());
- }
- return true;
- }
-
- void DisallowedType(Context& context, const ValueType& actualType) const {
- ErrorHandler& eh = context.error_handler;
- eh.StartDisallowedType();
-
- if (type_ & (1 << kNullSchemaType)) eh.AddExpectedType(GetNullString());
- if (type_ & (1 << kBooleanSchemaType)) eh.AddExpectedType(GetBooleanString());
- if (type_ & (1 << kObjectSchemaType)) eh.AddExpectedType(GetObjectString());
- if (type_ & (1 << kArraySchemaType)) eh.AddExpectedType(GetArrayString());
- if (type_ & (1 << kStringSchemaType)) eh.AddExpectedType(GetStringString());
-
- if (type_ & (1 << kNumberSchemaType)) eh.AddExpectedType(GetNumberString());
- else if (type_ & (1 << kIntegerSchemaType)) eh.AddExpectedType(GetIntegerString());
-
- eh.EndDisallowedType(actualType);
- }
-
- struct Property {
- Property() : schema(), dependenciesSchema(), dependenciesValidatorIndex(), dependencies(), required(false) {}
- ~Property() { AllocatorType::Free(dependencies); }
- SValue name;
- const SchemaType* schema;
- const SchemaType* dependenciesSchema;
- SizeType dependenciesValidatorIndex;
- bool* dependencies;
- bool required;
- };
-
- struct PatternProperty {
- PatternProperty() : schema(), pattern() {}
- ~PatternProperty() {
- if (pattern) {
- pattern->~RegexType();
- AllocatorType::Free(pattern);
- }
- }
- const SchemaType* schema;
- RegexType* pattern;
- };
-
- AllocatorType* allocator_;
- SValue uri_;
- PointerType pointer_;
- const SchemaType* typeless_;
- uint64_t* enum_;
- SizeType enumCount_;
- SchemaArray allOf_;
- SchemaArray anyOf_;
- SchemaArray oneOf_;
- const SchemaType* not_;
- unsigned type_; // bitmask of kSchemaType
- SizeType validatorCount_;
- SizeType notValidatorIndex_;
-
- Property* properties_;
- const SchemaType* additionalPropertiesSchema_;
- PatternProperty* patternProperties_;
- SizeType patternPropertyCount_;
- SizeType propertyCount_;
- SizeType minProperties_;
- SizeType maxProperties_;
- bool additionalProperties_;
- bool hasDependencies_;
- bool hasRequired_;
- bool hasSchemaDependencies_;
-
- const SchemaType* additionalItemsSchema_;
- const SchemaType* itemsList_;
- const SchemaType** itemsTuple_;
- SizeType itemsTupleCount_;
- SizeType minItems_;
- SizeType maxItems_;
- bool additionalItems_;
- bool uniqueItems_;
-
- RegexType* pattern_;
- SizeType minLength_;
- SizeType maxLength_;
-
- SValue minimum_;
- SValue maximum_;
- SValue multipleOf_;
- bool exclusiveMinimum_;
- bool exclusiveMaximum_;
-
- SizeType defaultValueLength_;
-};
-
-template<typename Stack, typename Ch>
-struct TokenHelper {
- RAPIDJSON_FORCEINLINE static void AppendIndexToken(Stack& documentStack, SizeType index) {
- *documentStack.template Push<Ch>() = '/';
- char buffer[21];
- size_t length = static_cast<size_t>((sizeof(SizeType) == 4 ? u32toa(index, buffer) : u64toa(index, buffer)) - buffer);
- for (size_t i = 0; i < length; i++)
- *documentStack.template Push<Ch>() = static_cast<Ch>(buffer[i]);
- }
-};
-
-// Partial specialized version for char to prevent buffer copying.
-template <typename Stack>
-struct TokenHelper<Stack, char> {
- RAPIDJSON_FORCEINLINE static void AppendIndexToken(Stack& documentStack, SizeType index) {
- if (sizeof(SizeType) == 4) {
- char *buffer = documentStack.template Push<char>(1 + 10); // '/' + uint
- *buffer++ = '/';
- const char* end = internal::u32toa(index, buffer);
- documentStack.template Pop<char>(static_cast<size_t>(10 - (end - buffer)));
- }
- else {
- char *buffer = documentStack.template Push<char>(1 + 20); // '/' + uint64
- *buffer++ = '/';
- const char* end = internal::u64toa(index, buffer);
- documentStack.template Pop<char>(static_cast<size_t>(20 - (end - buffer)));
- }
- }
-};
-
-} // namespace internal
-
-///////////////////////////////////////////////////////////////////////////////
-// IGenericRemoteSchemaDocumentProvider
-
-template <typename SchemaDocumentType>
-class IGenericRemoteSchemaDocumentProvider {
-public:
- typedef typename SchemaDocumentType::Ch Ch;
-
- virtual ~IGenericRemoteSchemaDocumentProvider() {}
- virtual const SchemaDocumentType* GetRemoteDocument(const Ch* uri, SizeType length) = 0;
-};
-
-///////////////////////////////////////////////////////////////////////////////
-// GenericSchemaDocument
-
-//! JSON schema document.
-/*!
- A JSON schema document is a compiled version of a JSON schema.
- It is basically a tree of internal::Schema.
-
- \note This is an immutable class (i.e. its instance cannot be modified after construction).
- \tparam ValueT Type of JSON value (e.g. \c Value ), which also determine the encoding.
- \tparam Allocator Allocator type for allocating memory of this document.
-*/
-template <typename ValueT, typename Allocator = CrtAllocator>
-class GenericSchemaDocument {
-public:
- typedef ValueT ValueType;
- typedef IGenericRemoteSchemaDocumentProvider<GenericSchemaDocument> IRemoteSchemaDocumentProviderType;
- typedef Allocator AllocatorType;
- typedef typename ValueType::EncodingType EncodingType;
- typedef typename EncodingType::Ch Ch;
- typedef internal::Schema<GenericSchemaDocument> SchemaType;
- typedef GenericPointer<ValueType, Allocator> PointerType;
- typedef GenericValue<EncodingType, Allocator> URIType;
- friend class internal::Schema<GenericSchemaDocument>;
- template <typename, typename, typename>
- friend class GenericSchemaValidator;
-
- //! Constructor.
- /*!
- Compile a JSON document into schema document.
-
- \param document A JSON document as source.
- \param uri The base URI of this schema document for purposes of violation reporting.
- \param uriLength Length of \c name, in code points.
- \param remoteProvider An optional remote schema document provider for resolving remote reference. Can be null.
- \param allocator An optional allocator instance for allocating memory. Can be null.
- */
- explicit GenericSchemaDocument(const ValueType& document, const Ch* uri = 0, SizeType uriLength = 0,
- IRemoteSchemaDocumentProviderType* remoteProvider = 0, Allocator* allocator = 0) :
- remoteProvider_(remoteProvider),
- allocator_(allocator),
- ownAllocator_(),
- root_(),
- typeless_(),
- schemaMap_(allocator, kInitialSchemaMapSize),
- schemaRef_(allocator, kInitialSchemaRefSize)
- {
- if (!allocator_)
- ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)();
-
- Ch noUri[1] = {0};
- uri_.SetString(uri ? uri : noUri, uriLength, *allocator_);
-
- typeless_ = static_cast<SchemaType*>(allocator_->Malloc(sizeof(SchemaType)));
- new (typeless_) SchemaType(this, PointerType(), ValueType(kObjectType).Move(), ValueType(kObjectType).Move(), allocator_);
-
- // Generate root schema, it will call CreateSchema() to create sub-schemas,
- // And call AddRefSchema() if there are $ref.
- CreateSchemaRecursive(&root_, PointerType(), document, document);
-
- // Resolve $ref
- while (!schemaRef_.Empty()) {
- SchemaRefEntry* refEntry = schemaRef_.template Pop<SchemaRefEntry>(1);
- if (const SchemaType* s = GetSchema(refEntry->target)) {
- if (refEntry->schema)
- *refEntry->schema = s;
-
- // Create entry in map if not exist
- if (!GetSchema(refEntry->source)) {
- new (schemaMap_.template Push<SchemaEntry>()) SchemaEntry(refEntry->source, const_cast<SchemaType*>(s), false, allocator_);
- }
- }
- else if (refEntry->schema)
- *refEntry->schema = typeless_;
-
- refEntry->~SchemaRefEntry();
- }
-
- RAPIDJSON_ASSERT(root_ != 0);
-
- schemaRef_.ShrinkToFit(); // Deallocate all memory for ref
- }
-
-#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
- //! Move constructor in C++11
- GenericSchemaDocument(GenericSchemaDocument&& rhs) RAPIDJSON_NOEXCEPT :
- remoteProvider_(rhs.remoteProvider_),
- allocator_(rhs.allocator_),
- ownAllocator_(rhs.ownAllocator_),
- root_(rhs.root_),
- typeless_(rhs.typeless_),
- schemaMap_(std::move(rhs.schemaMap_)),
- schemaRef_(std::move(rhs.schemaRef_)),
- uri_(std::move(rhs.uri_))
- {
- rhs.remoteProvider_ = 0;
- rhs.allocator_ = 0;
- rhs.ownAllocator_ = 0;
- rhs.typeless_ = 0;
- }
-#endif
-
- //! Destructor
- ~GenericSchemaDocument() {
- while (!schemaMap_.Empty())
- schemaMap_.template Pop<SchemaEntry>(1)->~SchemaEntry();
-
- if (typeless_) {
- typeless_->~SchemaType();
- Allocator::Free(typeless_);
- }
-
- RAPIDJSON_DELETE(ownAllocator_);
- }
-
- const URIType& GetURI() const { return uri_; }
-
- //! Get the root schema.
- const SchemaType& GetRoot() const { return *root_; }
-
-private:
- //! Prohibit copying
- GenericSchemaDocument(const GenericSchemaDocument&);
- //! Prohibit assignment
- GenericSchemaDocument& operator=(const GenericSchemaDocument&);
-
- struct SchemaRefEntry {
- SchemaRefEntry(const PointerType& s, const PointerType& t, const SchemaType** outSchema, Allocator *allocator) : source(s, allocator), target(t, allocator), schema(outSchema) {}
- PointerType source;
- PointerType target;
- const SchemaType** schema;
- };
-
- struct SchemaEntry {
- SchemaEntry(const PointerType& p, SchemaType* s, bool o, Allocator* allocator) : pointer(p, allocator), schema(s), owned(o) {}
- ~SchemaEntry() {
- if (owned) {
- schema->~SchemaType();
- Allocator::Free(schema);
- }
- }
- PointerType pointer;
- SchemaType* schema;
- bool owned;
- };
-
- void CreateSchemaRecursive(const SchemaType** schema, const PointerType& pointer, const ValueType& v, const ValueType& document) {
- if (schema)
- *schema = typeless_;
-
- if (v.GetType() == kObjectType) {
- const SchemaType* s = GetSchema(pointer);
- if (!s)
- CreateSchema(schema, pointer, v, document);
-
- for (typename ValueType::ConstMemberIterator itr = v.MemberBegin(); itr != v.MemberEnd(); ++itr)
- CreateSchemaRecursive(0, pointer.Append(itr->name, allocator_), itr->value, document);
- }
- else if (v.GetType() == kArrayType)
- for (SizeType i = 0; i < v.Size(); i++)
- CreateSchemaRecursive(0, pointer.Append(i, allocator_), v[i], document);
- }
-
- void CreateSchema(const SchemaType** schema, const PointerType& pointer, const ValueType& v, const ValueType& document) {
- RAPIDJSON_ASSERT(pointer.IsValid());
- if (v.IsObject()) {
- if (!HandleRefSchema(pointer, schema, v, document)) {
- SchemaType* s = new (allocator_->Malloc(sizeof(SchemaType))) SchemaType(this, pointer, v, document, allocator_);
- new (schemaMap_.template Push<SchemaEntry>()) SchemaEntry(pointer, s, true, allocator_);
- if (schema)
- *schema = s;
- }
- }
- }
-
- bool HandleRefSchema(const PointerType& source, const SchemaType** schema, const ValueType& v, const ValueType& document) {
- static const Ch kRefString[] = { '$', 'r', 'e', 'f', '\0' };
- static const ValueType kRefValue(kRefString, 4);
-
- typename ValueType::ConstMemberIterator itr = v.FindMember(kRefValue);
- if (itr == v.MemberEnd())
- return false;
-
- if (itr->value.IsString()) {
- SizeType len = itr->value.GetStringLength();
- if (len > 0) {
- const Ch* s = itr->value.GetString();
- SizeType i = 0;
- while (i < len && s[i] != '#') // Find the first #
- i++;
-
- if (i > 0) { // Remote reference, resolve immediately
- if (remoteProvider_) {
- if (const GenericSchemaDocument* remoteDocument = remoteProvider_->GetRemoteDocument(s, i)) {
- PointerType pointer(&s[i], len - i, allocator_);
- if (pointer.IsValid()) {
- if (const SchemaType* sc = remoteDocument->GetSchema(pointer)) {
- if (schema)
- *schema = sc;
- new (schemaMap_.template Push<SchemaEntry>()) SchemaEntry(source, const_cast<SchemaType*>(sc), false, allocator_);
- return true;
- }
- }
- }
- }
- }
- else if (s[i] == '#') { // Local reference, defer resolution
- PointerType pointer(&s[i], len - i, allocator_);
- if (pointer.IsValid()) {
- if (const ValueType* nv = pointer.Get(document))
- if (HandleRefSchema(source, schema, *nv, document))
- return true;
-
- new (schemaRef_.template Push<SchemaRefEntry>()) SchemaRefEntry(source, pointer, schema, allocator_);
- return true;
- }
- }
- }
- }
- return false;
- }
-
- const SchemaType* GetSchema(const PointerType& pointer) const {
- for (const SchemaEntry* target = schemaMap_.template Bottom<SchemaEntry>(); target != schemaMap_.template End<SchemaEntry>(); ++target)
- if (pointer == target->pointer)
- return target->schema;
- return 0;
- }
-
- PointerType GetPointer(const SchemaType* schema) const {
- for (const SchemaEntry* target = schemaMap_.template Bottom<SchemaEntry>(); target != schemaMap_.template End<SchemaEntry>(); ++target)
- if (schema == target->schema)
- return target->pointer;
- return PointerType();
- }
-
- const SchemaType* GetTypeless() const { return typeless_; }
-
- static const size_t kInitialSchemaMapSize = 64;
- static const size_t kInitialSchemaRefSize = 64;
-
- IRemoteSchemaDocumentProviderType* remoteProvider_;
- Allocator *allocator_;
- Allocator *ownAllocator_;
- const SchemaType* root_; //!< Root schema.
- SchemaType* typeless_;
- internal::Stack<Allocator> schemaMap_; // Stores created Pointer -> Schemas
- internal::Stack<Allocator> schemaRef_; // Stores Pointer from $ref and schema which holds the $ref
- URIType uri_;
-};
-
-//! GenericSchemaDocument using Value type.
-typedef GenericSchemaDocument<Value> SchemaDocument;
-//! IGenericRemoteSchemaDocumentProvider using SchemaDocument.
-typedef IGenericRemoteSchemaDocumentProvider<SchemaDocument> IRemoteSchemaDocumentProvider;
-
-///////////////////////////////////////////////////////////////////////////////
-// GenericSchemaValidator
-
-//! JSON Schema Validator.
-/*!
- A SAX style JSON schema validator.
- It uses a \c GenericSchemaDocument to validate SAX events.
- It delegates the incoming SAX events to an output handler.
- The default output handler does nothing.
- It can be reused multiple times by calling \c Reset().
-
- \tparam SchemaDocumentType Type of schema document.
- \tparam OutputHandler Type of output handler. Default handler does nothing.
- \tparam StateAllocator Allocator for storing the internal validation states.
-*/
-template <
- typename SchemaDocumentType,
- typename OutputHandler = BaseReaderHandler<typename SchemaDocumentType::SchemaType::EncodingType>,
- typename StateAllocator = CrtAllocator>
-class GenericSchemaValidator :
- public internal::ISchemaStateFactory<typename SchemaDocumentType::SchemaType>,
- public internal::ISchemaValidator,
- public internal::IValidationErrorHandler<typename SchemaDocumentType::SchemaType>
-{
-public:
- typedef typename SchemaDocumentType::SchemaType SchemaType;
- typedef typename SchemaDocumentType::PointerType PointerType;
- typedef typename SchemaType::EncodingType EncodingType;
- typedef typename SchemaType::SValue SValue;
- typedef typename EncodingType::Ch Ch;
- typedef GenericStringRef<Ch> StringRefType;
- typedef GenericValue<EncodingType, StateAllocator> ValueType;
-
- //! Constructor without output handler.
- /*!
- \param schemaDocument The schema document to conform to.
- \param allocator Optional allocator for storing internal validation states.
- \param schemaStackCapacity Optional initial capacity of schema path stack.
- \param documentStackCapacity Optional initial capacity of document path stack.
- */
- GenericSchemaValidator(
- const SchemaDocumentType& schemaDocument,
- StateAllocator* allocator = 0,
- size_t schemaStackCapacity = kDefaultSchemaStackCapacity,
- size_t documentStackCapacity = kDefaultDocumentStackCapacity)
- :
- schemaDocument_(&schemaDocument),
- root_(schemaDocument.GetRoot()),
- stateAllocator_(allocator),
- ownStateAllocator_(0),
- schemaStack_(allocator, schemaStackCapacity),
- documentStack_(allocator, documentStackCapacity),
- outputHandler_(0),
- error_(kObjectType),
- currentError_(),
- missingDependents_(),
- valid_(true)
-#if RAPIDJSON_SCHEMA_VERBOSE
- , depth_(0)
-#endif
- {
- }
-
- //! Constructor with output handler.
- /*!
- \param schemaDocument The schema document to conform to.
- \param allocator Optional allocator for storing internal validation states.
- \param schemaStackCapacity Optional initial capacity of schema path stack.
- \param documentStackCapacity Optional initial capacity of document path stack.
- */
- GenericSchemaValidator(
- const SchemaDocumentType& schemaDocument,
- OutputHandler& outputHandler,
- StateAllocator* allocator = 0,
- size_t schemaStackCapacity = kDefaultSchemaStackCapacity,
- size_t documentStackCapacity = kDefaultDocumentStackCapacity)
- :
- schemaDocument_(&schemaDocument),
- root_(schemaDocument.GetRoot()),
- stateAllocator_(allocator),
- ownStateAllocator_(0),
- schemaStack_(allocator, schemaStackCapacity),
- documentStack_(allocator, documentStackCapacity),
- outputHandler_(&outputHandler),
- error_(kObjectType),
- currentError_(),
- missingDependents_(),
- valid_(true)
-#if RAPIDJSON_SCHEMA_VERBOSE
- , depth_(0)
-#endif
- {
- }
-
- //! Destructor.
- ~GenericSchemaValidator() {
- Reset();
- RAPIDJSON_DELETE(ownStateAllocator_);
- }
-
- //! Reset the internal states.
- void Reset() {
- while (!schemaStack_.Empty())
- PopSchema();
- documentStack_.Clear();
- error_.SetObject();
- currentError_.SetNull();
- missingDependents_.SetNull();
- valid_ = true;
- }
-
- //! Checks whether the current state is valid.
- // Implementation of ISchemaValidator
- virtual bool IsValid() const { return valid_; }
-
- //! Gets the error object.
- ValueType& GetError() { return error_; }
- const ValueType& GetError() const { return error_; }
-
- //! Gets the JSON pointer pointed to the invalid schema.
- PointerType GetInvalidSchemaPointer() const {
- return schemaStack_.Empty() ? PointerType() : CurrentSchema().GetPointer();
- }
-
- //! Gets the keyword of invalid schema.
- const Ch* GetInvalidSchemaKeyword() const {
- return schemaStack_.Empty() ? 0 : CurrentContext().invalidKeyword;
- }
-
- //! Gets the JSON pointer pointed to the invalid value.
- PointerType GetInvalidDocumentPointer() const {
- if (documentStack_.Empty()) {
- return PointerType();
- }
- else {
- return PointerType(documentStack_.template Bottom<Ch>(), documentStack_.GetSize() / sizeof(Ch));
- }
- }
-
- void NotMultipleOf(int64_t actual, const SValue& expected) {
- AddNumberError(SchemaType::GetMultipleOfString(), ValueType(actual).Move(), expected);
- }
- void NotMultipleOf(uint64_t actual, const SValue& expected) {
- AddNumberError(SchemaType::GetMultipleOfString(), ValueType(actual).Move(), expected);
- }
- void NotMultipleOf(double actual, const SValue& expected) {
- AddNumberError(SchemaType::GetMultipleOfString(), ValueType(actual).Move(), expected);
- }
- void AboveMaximum(int64_t actual, const SValue& expected, bool exclusive) {
- AddNumberError(SchemaType::GetMaximumString(), ValueType(actual).Move(), expected,
- exclusive ? &SchemaType::GetExclusiveMaximumString : 0);
- }
- void AboveMaximum(uint64_t actual, const SValue& expected, bool exclusive) {
- AddNumberError(SchemaType::GetMaximumString(), ValueType(actual).Move(), expected,
- exclusive ? &SchemaType::GetExclusiveMaximumString : 0);
- }
- void AboveMaximum(double actual, const SValue& expected, bool exclusive) {
- AddNumberError(SchemaType::GetMaximumString(), ValueType(actual).Move(), expected,
- exclusive ? &SchemaType::GetExclusiveMaximumString : 0);
- }
- void BelowMinimum(int64_t actual, const SValue& expected, bool exclusive) {
- AddNumberError(SchemaType::GetMinimumString(), ValueType(actual).Move(), expected,
- exclusive ? &SchemaType::GetExclusiveMinimumString : 0);
- }
- void BelowMinimum(uint64_t actual, const SValue& expected, bool exclusive) {
- AddNumberError(SchemaType::GetMinimumString(), ValueType(actual).Move(), expected,
- exclusive ? &SchemaType::GetExclusiveMinimumString : 0);
- }
- void BelowMinimum(double actual, const SValue& expected, bool exclusive) {
- AddNumberError(SchemaType::GetMinimumString(), ValueType(actual).Move(), expected,
- exclusive ? &SchemaType::GetExclusiveMinimumString : 0);
- }
-
- void TooLong(const Ch* str, SizeType length, SizeType expected) {
- AddNumberError(SchemaType::GetMaxLengthString(),
- ValueType(str, length, GetStateAllocator()).Move(), SValue(expected).Move());
- }
- void TooShort(const Ch* str, SizeType length, SizeType expected) {
- AddNumberError(SchemaType::GetMinLengthString(),
- ValueType(str, length, GetStateAllocator()).Move(), SValue(expected).Move());
- }
- void DoesNotMatch(const Ch* str, SizeType length) {
- currentError_.SetObject();
- currentError_.AddMember(GetActualString(), ValueType(str, length, GetStateAllocator()).Move(), GetStateAllocator());
- AddCurrentError(SchemaType::GetPatternString());
- }
-
- void DisallowedItem(SizeType index) {
- currentError_.SetObject();
- currentError_.AddMember(GetDisallowedString(), ValueType(index).Move(), GetStateAllocator());
- AddCurrentError(SchemaType::GetAdditionalItemsString(), true);
- }
- void TooFewItems(SizeType actualCount, SizeType expectedCount) {
- AddNumberError(SchemaType::GetMinItemsString(),
- ValueType(actualCount).Move(), SValue(expectedCount).Move());
- }
- void TooManyItems(SizeType actualCount, SizeType expectedCount) {
- AddNumberError(SchemaType::GetMaxItemsString(),
- ValueType(actualCount).Move(), SValue(expectedCount).Move());
- }
- void DuplicateItems(SizeType index1, SizeType index2) {
- ValueType duplicates(kArrayType);
- duplicates.PushBack(index1, GetStateAllocator());
- duplicates.PushBack(index2, GetStateAllocator());
- currentError_.SetObject();
- currentError_.AddMember(GetDuplicatesString(), duplicates, GetStateAllocator());
- AddCurrentError(SchemaType::GetUniqueItemsString(), true);
- }
-
- void TooManyProperties(SizeType actualCount, SizeType expectedCount) {
- AddNumberError(SchemaType::GetMaxPropertiesString(),
- ValueType(actualCount).Move(), SValue(expectedCount).Move());
- }
- void TooFewProperties(SizeType actualCount, SizeType expectedCount) {
- AddNumberError(SchemaType::GetMinPropertiesString(),
- ValueType(actualCount).Move(), SValue(expectedCount).Move());
- }
- void StartMissingProperties() {
- currentError_.SetArray();
- }
- void AddMissingProperty(const SValue& name) {
- currentError_.PushBack(ValueType(name, GetStateAllocator()).Move(), GetStateAllocator());
- }
- bool EndMissingProperties() {
- if (currentError_.Empty())
- return false;
- ValueType error(kObjectType);
- error.AddMember(GetMissingString(), currentError_, GetStateAllocator());
- currentError_ = error;
- AddCurrentError(SchemaType::GetRequiredString());
- return true;
- }
- void PropertyViolations(ISchemaValidator** subvalidators, SizeType count) {
- for (SizeType i = 0; i < count; ++i)
- MergeError(static_cast<GenericSchemaValidator*>(subvalidators[i])->GetError());
- }
- void DisallowedProperty(const Ch* name, SizeType length) {
- currentError_.SetObject();
- currentError_.AddMember(GetDisallowedString(), ValueType(name, length, GetStateAllocator()).Move(), GetStateAllocator());
- AddCurrentError(SchemaType::GetAdditionalPropertiesString(), true);
- }
-
- void StartDependencyErrors() {
- currentError_.SetObject();
- }
- void StartMissingDependentProperties() {
- missingDependents_.SetArray();
- }
- void AddMissingDependentProperty(const SValue& targetName) {
- missingDependents_.PushBack(ValueType(targetName, GetStateAllocator()).Move(), GetStateAllocator());
- }
- void EndMissingDependentProperties(const SValue& sourceName) {
- if (!missingDependents_.Empty())
- currentError_.AddMember(ValueType(sourceName, GetStateAllocator()).Move(),
- missingDependents_, GetStateAllocator());
- }
- void AddDependencySchemaError(const SValue& sourceName, ISchemaValidator* subvalidator) {
- currentError_.AddMember(ValueType(sourceName, GetStateAllocator()).Move(),
- static_cast<GenericSchemaValidator*>(subvalidator)->GetError(), GetStateAllocator());
- }
- bool EndDependencyErrors() {
- if (currentError_.ObjectEmpty())
- return false;
- ValueType error(kObjectType);
- error.AddMember(GetErrorsString(), currentError_, GetStateAllocator());
- currentError_ = error;
- AddCurrentError(SchemaType::GetDependenciesString());
- return true;
- }
-
- void DisallowedValue() {
- currentError_.SetObject();
- AddCurrentError(SchemaType::GetEnumString());
- }
- void StartDisallowedType() {
- currentError_.SetArray();
- }
- void AddExpectedType(const typename SchemaType::ValueType& expectedType) {
- currentError_.PushBack(ValueType(expectedType, GetStateAllocator()).Move(), GetStateAllocator());
- }
- void EndDisallowedType(const typename SchemaType::ValueType& actualType) {
- ValueType error(kObjectType);
- error.AddMember(GetExpectedString(), currentError_, GetStateAllocator());
- error.AddMember(GetActualString(), ValueType(actualType, GetStateAllocator()).Move(), GetStateAllocator());
- currentError_ = error;
- AddCurrentError(SchemaType::GetTypeString());
- }
- void NotAllOf(ISchemaValidator** subvalidators, SizeType count) {
- for (SizeType i = 0; i < count; ++i) {
- MergeError(static_cast<GenericSchemaValidator*>(subvalidators[i])->GetError());
- }
- }
- void NoneOf(ISchemaValidator** subvalidators, SizeType count) {
- AddErrorArray(SchemaType::GetAnyOfString(), subvalidators, count);
- }
- void NotOneOf(ISchemaValidator** subvalidators, SizeType count) {
- AddErrorArray(SchemaType::GetOneOfString(), subvalidators, count);
- }
- void Disallowed() {
- currentError_.SetObject();
- AddCurrentError(SchemaType::GetNotString());
- }
-
-#define RAPIDJSON_STRING_(name, ...) \
- static const StringRefType& Get##name##String() {\
- static const Ch s[] = { __VA_ARGS__, '\0' };\
- static const StringRefType v(s, static_cast<SizeType>(sizeof(s) / sizeof(Ch) - 1)); \
- return v;\
- }
-
- RAPIDJSON_STRING_(InstanceRef, 'i', 'n', 's', 't', 'a', 'n', 'c', 'e', 'R', 'e', 'f')
- RAPIDJSON_STRING_(SchemaRef, 's', 'c', 'h', 'e', 'm', 'a', 'R', 'e', 'f')
- RAPIDJSON_STRING_(Expected, 'e', 'x', 'p', 'e', 'c', 't', 'e', 'd')
- RAPIDJSON_STRING_(Actual, 'a', 'c', 't', 'u', 'a', 'l')
- RAPIDJSON_STRING_(Disallowed, 'd', 'i', 's', 'a', 'l', 'l', 'o', 'w', 'e', 'd')
- RAPIDJSON_STRING_(Missing, 'm', 'i', 's', 's', 'i', 'n', 'g')
- RAPIDJSON_STRING_(Errors, 'e', 'r', 'r', 'o', 'r', 's')
- RAPIDJSON_STRING_(Duplicates, 'd', 'u', 'p', 'l', 'i', 'c', 'a', 't', 'e', 's')
-
-#undef RAPIDJSON_STRING_
-
-#if RAPIDJSON_SCHEMA_VERBOSE
-#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_() \
-RAPIDJSON_MULTILINEMACRO_BEGIN\
- *documentStack_.template Push<Ch>() = '\0';\
- documentStack_.template Pop<Ch>(1);\
- internal::PrintInvalidDocument(documentStack_.template Bottom<Ch>());\
-RAPIDJSON_MULTILINEMACRO_END
-#else
-#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_()
-#endif
-
-#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_(method, arg1)\
- if (!valid_) return false; \
- if (!BeginValue() || !CurrentSchema().method arg1) {\
- RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_();\
- return valid_ = false;\
- }
-
-#define RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(method, arg2)\
- for (Context* context = schemaStack_.template Bottom<Context>(); context != schemaStack_.template End<Context>(); context++) {\
- if (context->hasher)\
- static_cast<HasherType*>(context->hasher)->method arg2;\
- if (context->validators)\
- for (SizeType i_ = 0; i_ < context->validatorCount; i_++)\
- static_cast<GenericSchemaValidator*>(context->validators[i_])->method arg2;\
- if (context->patternPropertiesValidators)\
- for (SizeType i_ = 0; i_ < context->patternPropertiesValidatorCount; i_++)\
- static_cast<GenericSchemaValidator*>(context->patternPropertiesValidators[i_])->method arg2;\
- }
-
-#define RAPIDJSON_SCHEMA_HANDLE_END_(method, arg2)\
- return valid_ = EndValue() && (!outputHandler_ || outputHandler_->method arg2)
-
-#define RAPIDJSON_SCHEMA_HANDLE_VALUE_(method, arg1, arg2) \
- RAPIDJSON_SCHEMA_HANDLE_BEGIN_ (method, arg1);\
- RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(method, arg2);\
- RAPIDJSON_SCHEMA_HANDLE_END_ (method, arg2)
-
- bool Null() { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Null, (CurrentContext()), ( )); }
- bool Bool(bool b) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Bool, (CurrentContext(), b), (b)); }
- bool Int(int i) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Int, (CurrentContext(), i), (i)); }
- bool Uint(unsigned u) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Uint, (CurrentContext(), u), (u)); }
- bool Int64(int64_t i) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Int64, (CurrentContext(), i), (i)); }
- bool Uint64(uint64_t u) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Uint64, (CurrentContext(), u), (u)); }
- bool Double(double d) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Double, (CurrentContext(), d), (d)); }
- bool RawNumber(const Ch* str, SizeType length, bool copy)
- { RAPIDJSON_SCHEMA_HANDLE_VALUE_(String, (CurrentContext(), str, length, copy), (str, length, copy)); }
- bool String(const Ch* str, SizeType length, bool copy)
- { RAPIDJSON_SCHEMA_HANDLE_VALUE_(String, (CurrentContext(), str, length, copy), (str, length, copy)); }
-
- bool StartObject() {
- RAPIDJSON_SCHEMA_HANDLE_BEGIN_(StartObject, (CurrentContext()));
- RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(StartObject, ());
- return valid_ = !outputHandler_ || outputHandler_->StartObject();
- }
-
- bool Key(const Ch* str, SizeType len, bool copy) {
- if (!valid_) return false;
- AppendToken(str, len);
- if (!CurrentSchema().Key(CurrentContext(), str, len, copy)) return valid_ = false;
- RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(Key, (str, len, copy));
- return valid_ = !outputHandler_ || outputHandler_->Key(str, len, copy);
- }
-
- bool EndObject(SizeType memberCount) {
- if (!valid_) return false;
- RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(EndObject, (memberCount));
- if (!CurrentSchema().EndObject(CurrentContext(), memberCount)) return valid_ = false;
- RAPIDJSON_SCHEMA_HANDLE_END_(EndObject, (memberCount));
- }
-
- bool StartArray() {
- RAPIDJSON_SCHEMA_HANDLE_BEGIN_(StartArray, (CurrentContext()));
- RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(StartArray, ());
- return valid_ = !outputHandler_ || outputHandler_->StartArray();
- }
-
- bool EndArray(SizeType elementCount) {
- if (!valid_) return false;
- RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(EndArray, (elementCount));
- if (!CurrentSchema().EndArray(CurrentContext(), elementCount)) return valid_ = false;
- RAPIDJSON_SCHEMA_HANDLE_END_(EndArray, (elementCount));
- }
-
-#undef RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_
-#undef RAPIDJSON_SCHEMA_HANDLE_BEGIN_
-#undef RAPIDJSON_SCHEMA_HANDLE_PARALLEL_
-#undef RAPIDJSON_SCHEMA_HANDLE_VALUE_
-
- // Implementation of ISchemaStateFactory<SchemaType>
- virtual ISchemaValidator* CreateSchemaValidator(const SchemaType& root) {
- return new (GetStateAllocator().Malloc(sizeof(GenericSchemaValidator))) GenericSchemaValidator(*schemaDocument_, root, documentStack_.template Bottom<char>(), documentStack_.GetSize(),
-#if RAPIDJSON_SCHEMA_VERBOSE
- depth_ + 1,
-#endif
- &GetStateAllocator());
- }
-
- virtual void DestroySchemaValidator(ISchemaValidator* validator) {
- GenericSchemaValidator* v = static_cast<GenericSchemaValidator*>(validator);
- v->~GenericSchemaValidator();
- StateAllocator::Free(v);
- }
-
- virtual void* CreateHasher() {
- return new (GetStateAllocator().Malloc(sizeof(HasherType))) HasherType(&GetStateAllocator());
- }
-
- virtual uint64_t GetHashCode(void* hasher) {
- return static_cast<HasherType*>(hasher)->GetHashCode();
- }
-
- virtual void DestroryHasher(void* hasher) {
- HasherType* h = static_cast<HasherType*>(hasher);
- h->~HasherType();
- StateAllocator::Free(h);
- }
-
- virtual void* MallocState(size_t size) {
- return GetStateAllocator().Malloc(size);
- }
-
- virtual void FreeState(void* p) {
- StateAllocator::Free(p);
- }
-
-private:
- typedef typename SchemaType::Context Context;
- typedef GenericValue<UTF8<>, StateAllocator> HashCodeArray;
- typedef internal::Hasher<EncodingType, StateAllocator> HasherType;
-
- GenericSchemaValidator(
- const SchemaDocumentType& schemaDocument,
- const SchemaType& root,
- const char* basePath, size_t basePathSize,
-#if RAPIDJSON_SCHEMA_VERBOSE
- unsigned depth,
-#endif
- StateAllocator* allocator = 0,
- size_t schemaStackCapacity = kDefaultSchemaStackCapacity,
- size_t documentStackCapacity = kDefaultDocumentStackCapacity)
- :
- schemaDocument_(&schemaDocument),
- root_(root),
- stateAllocator_(allocator),
- ownStateAllocator_(0),
- schemaStack_(allocator, schemaStackCapacity),
- documentStack_(allocator, documentStackCapacity),
- outputHandler_(0),
- error_(kObjectType),
- currentError_(),
- missingDependents_(),
- valid_(true)
-#if RAPIDJSON_SCHEMA_VERBOSE
- , depth_(depth)
-#endif
- {
- if (basePath && basePathSize)
- memcpy(documentStack_.template Push<char>(basePathSize), basePath, basePathSize);
- }
-
- StateAllocator& GetStateAllocator() {
- if (!stateAllocator_)
- stateAllocator_ = ownStateAllocator_ = RAPIDJSON_NEW(StateAllocator)();
- return *stateAllocator_;
- }
-
- bool BeginValue() {
- if (schemaStack_.Empty())
- PushSchema(root_);
- else {
- if (CurrentContext().inArray)
- internal::TokenHelper<internal::Stack<StateAllocator>, Ch>::AppendIndexToken(documentStack_, CurrentContext().arrayElementIndex);
-
- if (!CurrentSchema().BeginValue(CurrentContext()))
- return false;
-
- SizeType count = CurrentContext().patternPropertiesSchemaCount;
- const SchemaType** sa = CurrentContext().patternPropertiesSchemas;
- typename Context::PatternValidatorType patternValidatorType = CurrentContext().valuePatternValidatorType;
- bool valueUniqueness = CurrentContext().valueUniqueness;
- RAPIDJSON_ASSERT(CurrentContext().valueSchema);
- PushSchema(*CurrentContext().valueSchema);
-
- if (count > 0) {
- CurrentContext().objectPatternValidatorType = patternValidatorType;
- ISchemaValidator**& va = CurrentContext().patternPropertiesValidators;
- SizeType& validatorCount = CurrentContext().patternPropertiesValidatorCount;
- va = static_cast<ISchemaValidator**>(MallocState(sizeof(ISchemaValidator*) * count));
- for (SizeType i = 0; i < count; i++)
- va[validatorCount++] = CreateSchemaValidator(*sa[i]);
- }
-
- CurrentContext().arrayUniqueness = valueUniqueness;
- }
- return true;
- }
-
- bool EndValue() {
- if (!CurrentSchema().EndValue(CurrentContext()))
- return false;
-
-#if RAPIDJSON_SCHEMA_VERBOSE
- GenericStringBuffer<EncodingType> sb;
- schemaDocument_->GetPointer(&CurrentSchema()).Stringify(sb);
-
- *documentStack_.template Push<Ch>() = '\0';
- documentStack_.template Pop<Ch>(1);
- internal::PrintValidatorPointers(depth_, sb.GetString(), documentStack_.template Bottom<Ch>());
-#endif
-
- uint64_t h = CurrentContext().arrayUniqueness ? static_cast<HasherType*>(CurrentContext().hasher)->GetHashCode() : 0;
-
- PopSchema();
-
- if (!schemaStack_.Empty()) {
- Context& context = CurrentContext();
- if (context.valueUniqueness) {
- HashCodeArray* a = static_cast<HashCodeArray*>(context.arrayElementHashCodes);
- if (!a)
- CurrentContext().arrayElementHashCodes = a = new (GetStateAllocator().Malloc(sizeof(HashCodeArray))) HashCodeArray(kArrayType);
- for (typename HashCodeArray::ConstValueIterator itr = a->Begin(); itr != a->End(); ++itr)
- if (itr->GetUint64() == h) {
- DuplicateItems(static_cast<SizeType>(itr - a->Begin()), a->Size());
- RAPIDJSON_INVALID_KEYWORD_RETURN(SchemaType::GetUniqueItemsString());
- }
- a->PushBack(h, GetStateAllocator());
- }
- }
-
- // Remove the last token of document pointer
- while (!documentStack_.Empty() && *documentStack_.template Pop<Ch>(1) != '/')
- ;
-
- return true;
- }
-
- void AppendToken(const Ch* str, SizeType len) {
- documentStack_.template Reserve<Ch>(1 + len * 2); // worst case all characters are escaped as two characters
- *documentStack_.template PushUnsafe<Ch>() = '/';
- for (SizeType i = 0; i < len; i++) {
- if (str[i] == '~') {
- *documentStack_.template PushUnsafe<Ch>() = '~';
- *documentStack_.template PushUnsafe<Ch>() = '0';
- }
- else if (str[i] == '/') {
- *documentStack_.template PushUnsafe<Ch>() = '~';
- *documentStack_.template PushUnsafe<Ch>() = '1';
- }
- else
- *documentStack_.template PushUnsafe<Ch>() = str[i];
- }
- }
-
- RAPIDJSON_FORCEINLINE void PushSchema(const SchemaType& schema) { new (schemaStack_.template Push<Context>()) Context(*this, *this, &schema); }
-
- RAPIDJSON_FORCEINLINE void PopSchema() {
- Context* c = schemaStack_.template Pop<Context>(1);
- if (HashCodeArray* a = static_cast<HashCodeArray*>(c->arrayElementHashCodes)) {
- a->~HashCodeArray();
- StateAllocator::Free(a);
- }
- c->~Context();
- }
-
- void AddErrorLocation(ValueType& result, bool parent) {
- GenericStringBuffer<EncodingType> sb;
- PointerType instancePointer = GetInvalidDocumentPointer();
- ((parent && instancePointer.GetTokenCount() > 0)
- ? PointerType(instancePointer.GetTokens(), instancePointer.GetTokenCount() - 1)
- : instancePointer).StringifyUriFragment(sb);
- ValueType instanceRef(sb.GetString(), static_cast<SizeType>(sb.GetSize() / sizeof(Ch)),
- GetStateAllocator());
- result.AddMember(GetInstanceRefString(), instanceRef, GetStateAllocator());
- sb.Clear();
- memcpy(sb.Push(CurrentSchema().GetURI().GetStringLength()),
- CurrentSchema().GetURI().GetString(),
- CurrentSchema().GetURI().GetStringLength() * sizeof(Ch));
- GetInvalidSchemaPointer().StringifyUriFragment(sb);
- ValueType schemaRef(sb.GetString(), static_cast<SizeType>(sb.GetSize() / sizeof(Ch)),
- GetStateAllocator());
- result.AddMember(GetSchemaRefString(), schemaRef, GetStateAllocator());
- }
-
- void AddError(ValueType& keyword, ValueType& error) {
- typename ValueType::MemberIterator member = error_.FindMember(keyword);
- if (member == error_.MemberEnd())
- error_.AddMember(keyword, error, GetStateAllocator());
- else {
- if (member->value.IsObject()) {
- ValueType errors(kArrayType);
- errors.PushBack(member->value, GetStateAllocator());
- member->value = errors;
- }
- member->value.PushBack(error, GetStateAllocator());
- }
- }
-
- void AddCurrentError(const typename SchemaType::ValueType& keyword, bool parent = false) {
- AddErrorLocation(currentError_, parent);
- AddError(ValueType(keyword, GetStateAllocator(), false).Move(), currentError_);
- }
-
- void MergeError(ValueType& other) {
- for (typename ValueType::MemberIterator it = other.MemberBegin(), end = other.MemberEnd(); it != end; ++it) {
- AddError(it->name, it->value);
- }
- }
-
- void AddNumberError(const typename SchemaType::ValueType& keyword, ValueType& actual, const SValue& expected,
- const typename SchemaType::ValueType& (*exclusive)() = 0) {
- currentError_.SetObject();
- currentError_.AddMember(GetActualString(), actual, GetStateAllocator());
- currentError_.AddMember(GetExpectedString(), ValueType(expected, GetStateAllocator()).Move(), GetStateAllocator());
- if (exclusive)
- currentError_.AddMember(ValueType(exclusive(), GetStateAllocator()).Move(), true, GetStateAllocator());
- AddCurrentError(keyword);
- }
-
- void AddErrorArray(const typename SchemaType::ValueType& keyword,
- ISchemaValidator** subvalidators, SizeType count) {
- ValueType errors(kArrayType);
- for (SizeType i = 0; i < count; ++i)
- errors.PushBack(static_cast<GenericSchemaValidator*>(subvalidators[i])->GetError(), GetStateAllocator());
- currentError_.SetObject();
- currentError_.AddMember(GetErrorsString(), errors, GetStateAllocator());
- AddCurrentError(keyword);
- }
-
- const SchemaType& CurrentSchema() const { return *schemaStack_.template Top<Context>()->schema; }
- Context& CurrentContext() { return *schemaStack_.template Top<Context>(); }
- const Context& CurrentContext() const { return *schemaStack_.template Top<Context>(); }
-
- static const size_t kDefaultSchemaStackCapacity = 1024;
- static const size_t kDefaultDocumentStackCapacity = 256;
- const SchemaDocumentType* schemaDocument_;
- const SchemaType& root_;
- StateAllocator* stateAllocator_;
- StateAllocator* ownStateAllocator_;
- internal::Stack<StateAllocator> schemaStack_; //!< stack to store the current path of schema (BaseSchemaType *)
- internal::Stack<StateAllocator> documentStack_; //!< stack to store the current path of validating document (Ch)
- OutputHandler* outputHandler_;
- ValueType error_;
- ValueType currentError_;
- ValueType missingDependents_;
- bool valid_;
-#if RAPIDJSON_SCHEMA_VERBOSE
- unsigned depth_;
-#endif
-};
-
-typedef GenericSchemaValidator<SchemaDocument> SchemaValidator;
-
-///////////////////////////////////////////////////////////////////////////////
-// SchemaValidatingReader
-
-//! A helper class for parsing with validation.
-/*!
- This helper class is a functor, designed as a parameter of \ref GenericDocument::Populate().
-
- \tparam parseFlags Combination of \ref ParseFlag.
- \tparam InputStream Type of input stream, implementing Stream concept.
- \tparam SourceEncoding Encoding of the input stream.
- \tparam SchemaDocumentType Type of schema document.
- \tparam StackAllocator Allocator type for stack.
-*/
-template <
- unsigned parseFlags,
- typename InputStream,
- typename SourceEncoding,
- typename SchemaDocumentType = SchemaDocument,
- typename StackAllocator = CrtAllocator>
-class SchemaValidatingReader {
-public:
- typedef typename SchemaDocumentType::PointerType PointerType;
- typedef typename InputStream::Ch Ch;
- typedef GenericValue<SourceEncoding, StackAllocator> ValueType;
-
- //! Constructor
- /*!
- \param is Input stream.
- \param sd Schema document.
- */
- SchemaValidatingReader(InputStream& is, const SchemaDocumentType& sd) : is_(is), sd_(sd), invalidSchemaKeyword_(), error_(kObjectType), isValid_(true) {}
-
- template <typename Handler>
- bool operator()(Handler& handler) {
- GenericReader<SourceEncoding, typename SchemaDocumentType::EncodingType, StackAllocator> reader;
- GenericSchemaValidator<SchemaDocumentType, Handler> validator(sd_, handler);
- parseResult_ = reader.template Parse<parseFlags>(is_, validator);
-
- isValid_ = validator.IsValid();
- if (isValid_) {
- invalidSchemaPointer_ = PointerType();
- invalidSchemaKeyword_ = 0;
- invalidDocumentPointer_ = PointerType();
- error_.SetObject();
- }
- else {
- invalidSchemaPointer_ = validator.GetInvalidSchemaPointer();
- invalidSchemaKeyword_ = validator.GetInvalidSchemaKeyword();
- invalidDocumentPointer_ = validator.GetInvalidDocumentPointer();
- error_.CopyFrom(validator.GetError(), allocator_);
- }
-
- return parseResult_;
- }
-
- const ParseResult& GetParseResult() const { return parseResult_; }
- bool IsValid() const { return isValid_; }
- const PointerType& GetInvalidSchemaPointer() const { return invalidSchemaPointer_; }
- const Ch* GetInvalidSchemaKeyword() const { return invalidSchemaKeyword_; }
- const PointerType& GetInvalidDocumentPointer() const { return invalidDocumentPointer_; }
- const ValueType& GetError() const { return error_; }
-
-private:
- InputStream& is_;
- const SchemaDocumentType& sd_;
-
- ParseResult parseResult_;
- PointerType invalidSchemaPointer_;
- const Ch* invalidSchemaKeyword_;
- PointerType invalidDocumentPointer_;
- StackAllocator allocator_;
- ValueType error_;
- bool isValid_;
-};
-
-RAPIDJSON_NAMESPACE_END
-RAPIDJSON_DIAG_POP
-
-#endif // RAPIDJSON_SCHEMA_H_
diff --git a/src/native/external/rapidjson/stream.h b/src/native/external/rapidjson/stream.h
index 7f2643e48142..1fd70915c547 100644
--- a/src/native/external/rapidjson/stream.h
+++ b/src/native/external/rapidjson/stream.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
diff --git a/src/native/external/rapidjson/stringbuffer.h b/src/native/external/rapidjson/stringbuffer.h
index 4e38b82c3d98..82ad3ca6bbfe 100644
--- a/src/native/external/rapidjson/stringbuffer.h
+++ b/src/native/external/rapidjson/stringbuffer.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
diff --git a/src/native/external/rapidjson/writer.h b/src/native/external/rapidjson/writer.h
index 6f5b6903467a..632e02ce74a5 100644
--- a/src/native/external/rapidjson/writer.h
+++ b/src/native/external/rapidjson/writer.h
@@ -1,6 +1,6 @@
// Tencent is pleased to support the open source community by making RapidJSON available.
//
-// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
@@ -16,6 +16,7 @@
#define RAPIDJSON_WRITER_H_
#include "stream.h"
+#include "internal/clzll.h"
#include "internal/meta.h"
#include "internal/stack.h"
#include "internal/strfunc.h"
@@ -66,6 +67,7 @@ enum WriteFlag {
kWriteNoFlags = 0, //!< No flags are set.
kWriteValidateEncodingFlag = 1, //!< Validate encoding of JSON strings.
kWriteNanAndInfFlag = 2, //!< Allow writing of Infinity, -Infinity and NaN.
+ kWriteNanAndInfNullFlag = 4, //!< Allow writing of Infinity, -Infinity and NaN as null.
kWriteDefaultFlags = RAPIDJSON_WRITE_DEFAULT_FLAGS //!< Default write flags. Can be customized by defining RAPIDJSON_WRITE_DEFAULT_FLAGS
};
@@ -226,7 +228,7 @@ public:
return Key(str.data(), SizeType(str.size()));
}
#endif
-
+
bool EndObject(SizeType memberCount = 0) {
(void)memberCount;
RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level)); // not inside an Object
@@ -282,6 +284,8 @@ public:
os_->Flush();
}
+ static const size_t kDefaultLevelDepth = 32;
+
protected:
//! Information for each nested level
struct Level {
@@ -290,8 +294,6 @@ protected:
bool inArray; //!< true if in array, otherwise in object
};
- static const size_t kDefaultLevelDepth = 32;
-
bool WriteNull() {
PutReserve(*os_, 4);
PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 'l'); return true;
@@ -347,8 +349,13 @@ protected:
bool WriteDouble(double d) {
if (internal::Double(d).IsNanOrInf()) {
- if (!(writeFlags & kWriteNanAndInfFlag))
+ if (!(writeFlags & kWriteNanAndInfFlag) && !(writeFlags & kWriteNanAndInfNullFlag))
return false;
+ if (writeFlags & kWriteNanAndInfNullFlag) {
+ PutReserve(*os_, 4);
+ PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 'l');
+ return true;
+ }
if (internal::Double(d).IsNan()) {
PutReserve(*os_, 3);
PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N');
@@ -547,6 +554,11 @@ inline bool Writer<StringBuffer>::WriteDouble(double d) {
// Note: This code path can only be reached if (RAPIDJSON_WRITE_DEFAULT_FLAGS & kWriteNanAndInfFlag).
if (!(kWriteDefaultFlags & kWriteNanAndInfFlag))
return false;
+ if (kWriteDefaultFlags & kWriteNanAndInfNullFlag) {
+ PutReserve(*os_, 4);
+ PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 'l');
+ return true;
+ }
if (internal::Double(d).IsNan()) {
PutReserve(*os_, 3);
PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N');
@@ -668,19 +680,19 @@ inline bool Writer<StringBuffer>::ScanWriteUnescapedString(StringStream& is, siz
x = vorrq_u8(x, vcltq_u8(s, s3));
x = vrev64q_u8(x); // Rev in 64
- uint64_t low = vgetq_lane_u64(reinterpret_cast<uint64x2_t>(x), 0); // extract
- uint64_t high = vgetq_lane_u64(reinterpret_cast<uint64x2_t>(x), 1); // extract
+ uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract
+ uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract
SizeType len = 0;
bool escaped = false;
if (low == 0) {
if (high != 0) {
- unsigned lz = (unsigned)__builtin_clzll(high);
+ uint32_t lz = internal::clzll(high);
len = 8 + (lz >> 3);
escaped = true;
}
} else {
- unsigned lz = (unsigned)__builtin_clzll(low);
+ uint32_t lz = internal::clzll(low);
len = lz >> 3;
escaped = true;
}
diff --git a/src/tasks/WorkloadBuildTasks/PackageInstaller.cs b/src/tasks/WorkloadBuildTasks/PackageInstaller.cs
index 0d2f5db32906..a1b8583bd6e1 100644
--- a/src/tasks/WorkloadBuildTasks/PackageInstaller.cs
+++ b/src/tasks/WorkloadBuildTasks/PackageInstaller.cs
@@ -54,9 +54,18 @@ namespace Microsoft.Workload.Build.Tasks
Directory.CreateDirectory(projecDir);
- File.WriteAllText(Path.Combine(projecDir, "Directory.Build.props"), "<Project />");
- File.WriteAllText(Path.Combine(projecDir, "Directory.Packages.props"), "<Project />");
- File.WriteAllText(Path.Combine(projecDir, "Directory.Build.targets"), "<Project />");
+ File.WriteAllText(Path.Combine(projecDir, "Directory.Build.props"), """
+<Project>
+
+ <!-- This is an empty Directory.Build.props file to prevent projects which reside
+ under this directory to use any of the repository local settings. -->
+ <PropertyGroup>
+ <ImportDirectoryPackagesProps>false</ImportDirectoryPackagesProps>
+ <ImportDirectoryBuildTargets>false</ImportDirectoryBuildTargets>
+ </PropertyGroup>
+
+</Project>
+""");
File.WriteAllText(projectPath, GenerateProject(references));
File.WriteAllText(Path.Combine(projecDir, "nuget.config"), _nugetConfigContents);
diff --git a/src/tests/Common/testenvironment.proj b/src/tests/Common/testenvironment.proj
index 7d0ba71411fa..e82110af21a8 100644
--- a/src/tests/Common/testenvironment.proj
+++ b/src/tests/Common/testenvironment.proj
@@ -80,7 +80,7 @@
RunningIlasmRoundTrip;
DOTNET_JitSynthesizeCounts;
DOTNET_JitCheckSynthesizedCounts;
- DOTNET_JitEnableCrossBlockLocalAssertionProp
+ DOTNET_JitRLCSEGreedy;
</DOTNETVariables>
</PropertyGroup>
<ItemGroup>
@@ -223,7 +223,6 @@
<TestEnvironment Include="jitobjectstackallocation" JitObjectStackAllocation="1" TieredCompilation="0" />
<TestEnvironment Include="jitphysicalpromotion_only" JitStressModeNames="STRESS_NO_OLD_PROMOTION" TieredCompilation="0" />
<TestEnvironment Include="jitphysicalpromotion_full" JitStressModeNames="STRESS_PHYSICAL_PROMOTION_COST STRESS_NO_OLD_PROMOTION" TieredCompilation="0" />
- <TestEnvironment Include="jitcrossblocklocalassertionprop" JitEnableCrossBlockLocalAssertionProp="1" TieredCompilation="0" />
<TestEnvironment Include="jitcfg" JitForceControlFlowGuard="1" />
<TestEnvironment Include="jitcfg_dispatcher_always" JitForceControlFlowGuard="1" JitCFGUseDispatcher="1" />
<TestEnvironment Include="jitcfg_dispatcher_never" JitForceControlFlowGuard="1" JitCFGUseDispatcher="0" />
@@ -239,6 +238,7 @@
<TestEnvironment Include="fullpgo_random_gdv_edge" TieredPGO="1" TieredCompilation="1" TC_QuickJitForLoops="1" ReadyToRun="0" JitRandomGuardedDevirtualization="1" JitRandomEdgeCounts="1" JitRandomlyCollect64BitCounts="1" />
<TestEnvironment Include="syntheticpgo" TieredCompilation="1" TC_QuickJitForLoops="1" ReadyToRun="0" JitSynthesizeCounts="1" JitCheckSynthesizedProfile="1" />
<TestEnvironment Include="syntheticpgo_blend" TieredPGO="1" TieredCompilation="1" TC_QuickJitForLoops="1" ReadyToRun="0" JitSynthesizeCounts="3" JitCheckSynthesizedProfile="1" />
+ <TestEnvironment Include="jitrlcse" JitRLCSEGreedy="1" />
<TestEnvironment Include="gcstandalone" Condition="'$(TargetsWindows)' == 'true'" GCName="clrgc.dll"/>
<TestEnvironment Include="gcstandalone" Condition="'$(TargetsWindows)' != 'true'" GCName="libclrgc.so"/>
<TestEnvironment Include="gcstandaloneserver" Condition="'$(TargetsWindows)' == 'true'" gcServer="1" GCName="clrgc.dll"/>
diff --git a/src/tests/Interop/MarshalAPI/FunctionPointer/GenericFunctionPointer.cs b/src/tests/Interop/MarshalAPI/FunctionPointer/GenericFunctionPointer.cs
index fe6d37f01a98..da2fc75d9138 100644
--- a/src/tests/Interop/MarshalAPI/FunctionPointer/GenericFunctionPointer.cs
+++ b/src/tests/Interop/MarshalAPI/FunctionPointer/GenericFunctionPointer.cs
@@ -29,6 +29,12 @@ public partial class FunctionPtr
return new() { X = Convert.ToInt32(arg) };
}
+ [UnmanagedCallersOnly]
+ static unsafe void UnmanagedExportedFunctionRefInt(int* pval, float arg)
+ {
+ *pval = Convert.ToInt32(arg);
+ }
+
class GenericCaller<T>
{
internal static unsafe T GenericCalli<U>(void* fnptr, U arg)
@@ -40,6 +46,11 @@ public partial class FunctionPtr
{
return ((delegate* unmanaged<U, BlittableGeneric<T>>)fnptr)(arg);
}
+
+ internal static unsafe void NonGenericCalli<U>(void* fnptr, ref int val, float arg)
+ {
+ ((delegate* unmanaged<ref int, float, void>)fnptr)(ref val, arg);
+ }
}
struct BlittableGeneric<T>
@@ -81,6 +92,14 @@ public partial class FunctionPtr
outVar = GenericCaller<string>.WrappedGenericCalli((delegate* unmanaged<float, BlittableGeneric<string>>)&UnmanagedExportedFunctionBlittableGenericString, inVal).X;
}
Assert.Equal(expectedValue, outVar);
+
+ outVar = 0;
+ Console.WriteLine("Testing non-GenericCalli with non-blittable argument in a generic caller");
+ unsafe
+ {
+ GenericCaller<string>.NonGenericCalli<string>((delegate* unmanaged<int*, float, void>)&UnmanagedExportedFunctionRefInt, ref outVar, inVal);
+ }
+ Assert.Equal(expectedValue, outVar);
}
[ConditionalFact(nameof(CanRunInvalidGenericFunctionPointerTest))]
diff --git a/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.cs b/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.cs
index 67a398d357e1..d4b81bafcd4c 100644
--- a/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.cs
+++ b/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.cs
@@ -26,6 +26,9 @@ public class ErrorHandlingTests
[DllImport(SwiftLib, EntryPoint = "$s18SwiftErrorHandling05getMyB7Message4from13messageLengthSPys6UInt16VGSgs0B0_p_s5Int32VztF")]
public unsafe static extern void* GetErrorMessage(void* handle, out int length);
+ [DllImport(SwiftLib, EntryPoint = "$s18SwiftErrorHandling16freeStringBuffer6bufferySpys6UInt16VG_tF")]
+ public unsafe static extern void FreeErrorMessageBuffer(void* stringPtr);
+
[Fact]
public unsafe static void TestSwiftErrorThrown()
{
@@ -99,7 +102,7 @@ public class ErrorHandlingTests
{
void* pointer = GetErrorMessage(error.Value, out int messageLength);
string errorMessage = Marshal.PtrToStringUni((IntPtr)pointer, messageLength);
- NativeMemory.Free((void*)pointer);
+ FreeErrorMessageBuffer(pointer);
return errorMessage;
}
}
diff --git a/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.csproj b/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.csproj
index 49be10b93939..89eda99352fd 100644
--- a/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.csproj
+++ b/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.csproj
@@ -5,8 +5,6 @@
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<!-- Swift interop is supported on Apple platforms only -->
<CLRTestTargetUnsupported Condition="'$(TargetsOSX)' != 'true' and '$(TargetsAppleMobile)' != 'true'">true</CLRTestTargetUnsupported>
- <!-- Tracking issue: https://github.com/dotnet/runtime/issues/93631 -->
- <CLRTestTargetUnsupported Condition="'$(RuntimeFlavor)' != 'mono'">true</CLRTestTargetUnsupported>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(MSBuildProjectName).cs" />
diff --git a/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.swift b/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.swift
index 20022c0dba3e..5058014a42ce 100644
--- a/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.swift
+++ b/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.swift
@@ -33,3 +33,7 @@ public func getMyErrorMessage(from error: Error, messageLength: inout Int32) ->
}
return nil
}
+
+public func freeStringBuffer(buffer: UnsafeMutablePointer<unichar>) {
+ buffer.deallocate()
+}
diff --git a/src/tests/Interop/Swift/SwiftSelfContext/SwiftSelfContext.csproj b/src/tests/Interop/Swift/SwiftSelfContext/SwiftSelfContext.csproj
index 49be10b93939..89eda99352fd 100644
--- a/src/tests/Interop/Swift/SwiftSelfContext/SwiftSelfContext.csproj
+++ b/src/tests/Interop/Swift/SwiftSelfContext/SwiftSelfContext.csproj
@@ -5,8 +5,6 @@
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<!-- Swift interop is supported on Apple platforms only -->
<CLRTestTargetUnsupported Condition="'$(TargetsOSX)' != 'true' and '$(TargetsAppleMobile)' != 'true'">true</CLRTestTargetUnsupported>
- <!-- Tracking issue: https://github.com/dotnet/runtime/issues/93631 -->
- <CLRTestTargetUnsupported Condition="'$(RuntimeFlavor)' != 'mono'">true</CLRTestTargetUnsupported>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(MSBuildProjectName).cs" />
diff --git a/src/tests/JIT/opt/Structs/MemsetMemcpyNullref.cs b/src/tests/JIT/opt/Structs/MemsetMemcpyNullref.cs
new file mode 100644
index 000000000000..0d18e7bf5351
--- /dev/null
+++ b/src/tests/JIT/opt/Structs/MemsetMemcpyNullref.cs
@@ -0,0 +1,80 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Runtime.CompilerServices;
+using Xunit;
+
+public unsafe class MemsetMemcpyNullref
+{
+ [Fact]
+ public static void MemsetMemcpyThrowNullRefonNull()
+ {
+ Assert.Throws<NullReferenceException>(() => MemoryInit(null));
+ Assert.Throws<NullReferenceException>(() => MemoryCopy(null, null));
+ Assert.Throws<NullReferenceException>(() =>
+ {
+ // Check when only src is null
+ HugeStruct hs = default;
+ MemoryCopy(&hs, null);
+ });
+ Assert.Throws<NullReferenceException>(() =>
+ {
+ // Check when only dst is null
+ HugeStruct hs = default;
+ MemoryCopy(null, &hs);
+ });
+
+ // Check various lengths
+ uint[] lengths = [1, 10, 100, 1000, 10000, 100000, 1000000];
+ foreach (uint length in lengths)
+ {
+ Assert.Throws<NullReferenceException>(() => MemoryInitByref(ref Unsafe.NullRef<byte>(), length));
+ Assert.Throws<NullReferenceException>(() => MemoryCopyByref(ref Unsafe.NullRef<byte>(), ref Unsafe.NullRef<byte>(), length));
+ }
+
+ // These APIs are not expected to fail/throw on zero length, even if pointers are not valid
+ byte valid = 0;
+ MemoryInitByref(ref Unsafe.NullRef<byte>(), 0);
+ MemoryCopyByref(ref Unsafe.NullRef<byte>(), ref valid, 0);
+ MemoryCopyByref(ref valid, ref Unsafe.NullRef<byte>(), 0);
+ MemoryCopyByref(ref Unsafe.NullRef<byte>(), ref Unsafe.NullRef<byte>(), 0);
+
+ byte valid2 = 0;
+ MemoryInitByrefZeroLen(ref valid);
+ MemoryInitByrefZeroLen(ref Unsafe.NullRef<byte>());
+ MemoryCopyByrefZeroLen(ref valid, ref valid2);
+ MemoryCopyByrefZeroLen(ref valid, ref Unsafe.NullRef<byte>());
+ MemoryCopyByrefZeroLen(ref Unsafe.NullRef<byte>(), ref valid2);
+ MemoryCopyByrefZeroLen(ref Unsafe.NullRef<byte>(), ref Unsafe.NullRef<byte>());
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void MemoryCopy(HugeStruct* dst, HugeStruct* src) =>
+ *dst = *src;
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void MemoryCopyByref(ref byte dst, ref byte src, uint len) =>
+ Unsafe.CopyBlockUnaligned(ref dst, ref src, len);
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void MemoryInit(HugeStruct* dst) =>
+ *dst = default;
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void MemoryInitByref(ref byte dst, uint len) =>
+ Unsafe.InitBlockUnaligned(ref dst, 42, len);
+
+ private struct HugeStruct
+ {
+ public fixed byte Data[20_000];
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void MemoryCopyByrefZeroLen(ref byte dst, ref byte src) =>
+ Unsafe.CopyBlockUnaligned(ref dst, ref src, 0);
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void MemoryInitByrefZeroLen(ref byte dst) =>
+ Unsafe.InitBlockUnaligned(ref dst, 42, 0);
+}
diff --git a/src/tests/JIT/opt/Structs/MemsetMemcpyNullref.csproj b/src/tests/JIT/opt/Structs/MemsetMemcpyNullref.csproj
new file mode 100644
index 000000000000..23d7b90be536
--- /dev/null
+++ b/src/tests/JIT/opt/Structs/MemsetMemcpyNullref.csproj
@@ -0,0 +1,10 @@
+<Project Sdk="Microsoft.NET.Sdk">
+ <PropertyGroup>
+ <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
+ <DebugType>None</DebugType>
+ <Optimize>True</Optimize>
+ </PropertyGroup>
+ <ItemGroup>
+ <Compile Include="$(MSBuildProjectName).cs"/>
+ </ItemGroup>
+</Project>
diff --git a/src/tests/JIT/opt/Vectorization/BufferMemmoveTailCall.il b/src/tests/JIT/opt/Vectorization/BufferMemmoveTailCall.il
deleted file mode 100644
index 068f11ad7b61..000000000000
--- a/src/tests/JIT/opt/Vectorization/BufferMemmoveTailCall.il
+++ /dev/null
@@ -1,99 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-.assembly extern System.Runtime { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) }
-.assembly extern xunit.core {}
-.assembly extern System.Runtime.Extensions {}
-.assembly BufferMemmoveTailCall {
- // Allow access to private members of System.Private.CoreLib
- .custom instance void System.Runtime.CompilerServices.IgnoresAccessChecksToAttribute::.ctor(string) = (
- 01 00 16 53 79 73 74 65 6d 2e 50 72 69 76 61 74
- 65 2e 43 6f 72 65 4c 69 62 00 00
- )
-}
-
-.class public abstract auto ansi sealed beforefieldinit TailCallBufferMemmove
- extends [System.Runtime]System.Object
-{
- .method public hidebysig static int32 Main() cil managed
- {
- .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
- 01 00 00 00
- )
- .maxstack 8
- .entrypoint
-
- // C#:
- // byte[] src = new byte[32];
- // Test(ref src[0]);
-
- ldc.i4.s 32
- newarr [System.Runtime]System.Byte
- ldc.i4.0
- ldelema [System.Runtime]System.Byte
- call void TailCallBufferMemmove::Test(uint8&)
-
- // return 100;
- ldc.i4.s 100
- ret
- }
-
- .method private hidebysig static void Test (uint8& src) cil managed noinlining
- {
- .maxstack 3
-
- // C#:
- // byte* data = stackalloc byte[64]; // to trigger slow helper-based tail calls
- // Buffer.Memmove(ref Unsafe.AsRef<byte>(data), ref src, 64);
-
- ldc.i4.s 64
- conv.u
- localloc
- call !!0& [System.Runtime]System.Runtime.CompilerServices.Unsafe::AsRef<uint8>(void*)
- ldarg.0
- ldc.i4.s 64
- conv.i
- tail. call void [System.Runtime]System.Buffer::Memmove(uint8&, uint8&, native uint)
- ret
- }
-}
-
-// C#:
-// namespace System.Runtime.CompilerServices
-// {
-// public class IgnoresAccessChecksToAttribute : Attribute
-// {
-// public IgnoresAccessChecksToAttribute(string assemblyName)
-// {
-// AssemblyName = assemblyName;
-// }
-// public string AssemblyName { get; }
-// }
-// }
-//
-.class public auto ansi beforefieldinit System.Runtime.CompilerServices.IgnoresAccessChecksToAttribute
- extends [System.Runtime]System.Attribute
-{
- .field private initonly string '<AssemblyName>k__BackingField'
- .method public hidebysig specialname rtspecialname instance void .ctor (string assemblyName) cil managed
- {
- .maxstack 8
- ldarg.0
- call instance void [System.Runtime]System.Attribute::.ctor()
- ldarg.0
- ldarg.1
- stfld string System.Runtime.CompilerServices.IgnoresAccessChecksToAttribute::'<AssemblyName>k__BackingField'
- ret
- }
- .method public hidebysig specialname instance string get_AssemblyName () cil managed
- {
- .maxstack 8
- ldarg.0
- ldfld string System.Runtime.CompilerServices.IgnoresAccessChecksToAttribute::'<AssemblyName>k__BackingField'
- ret
- }
- .property instance string AssemblyName()
- {
- .get instance string System.Runtime.CompilerServices.IgnoresAccessChecksToAttribute::get_AssemblyName()
- }
-}
diff --git a/src/tests/JIT/opt/Vectorization/BufferMemmoveTailCall.ilproj b/src/tests/JIT/opt/Vectorization/BufferMemmoveTailCall.ilproj
deleted file mode 100644
index 5fa250452852..000000000000
--- a/src/tests/JIT/opt/Vectorization/BufferMemmoveTailCall.ilproj
+++ /dev/null
@@ -1,8 +0,0 @@
-<Project Sdk="Microsoft.NET.Sdk.IL">
- <PropertyGroup>
- <Optimize>True</Optimize>
- </PropertyGroup>
- <ItemGroup>
- <Compile Include="$(MSBuildProjectName).il" />
- </ItemGroup>
-</Project>
diff --git a/src/tests/baseservices/exceptions/unhandled/dependencytodelete.cs b/src/tests/baseservices/exceptions/unhandled/dependencytodelete.cs
new file mode 100644
index 000000000000..3897a8779df7
--- /dev/null
+++ b/src/tests/baseservices/exceptions/unhandled/dependencytodelete.cs
@@ -0,0 +1,12 @@
+using System;
+
+namespace Dependency
+{
+ public class DependencyClass
+ {
+ public static void Hello()
+ {
+ Console.WriteLine("Hello");
+ }
+ }
+}
diff --git a/src/tests/baseservices/exceptions/unhandled/dependencytodelete.csproj b/src/tests/baseservices/exceptions/unhandled/dependencytodelete.csproj
new file mode 100644
index 000000000000..fa1f2d01f80e
--- /dev/null
+++ b/src/tests/baseservices/exceptions/unhandled/dependencytodelete.csproj
@@ -0,0 +1,9 @@
+<Project Sdk="Microsoft.NET.Sdk">
+ <PropertyGroup>
+ <CLRTestKind>BuildOnly</CLRTestKind>
+ <OutputType>Library</OutputType>
+ </PropertyGroup>
+ <ItemGroup>
+ <Compile Include="dependencytodelete.cs" />
+ </ItemGroup>
+</Project>
diff --git a/src/tests/baseservices/exceptions/unhandled/unhandledTester.cs b/src/tests/baseservices/exceptions/unhandled/unhandledTester.cs
index 151c8b635166..2c30f593dfd9 100644
--- a/src/tests/baseservices/exceptions/unhandled/unhandledTester.cs
+++ b/src/tests/baseservices/exceptions/unhandled/unhandledTester.cs
@@ -14,14 +14,14 @@ namespace TestUnhandledExceptionTester
{
public class Program
{
- static void RunExternalProcess(string unhandledType)
+ static void RunExternalProcess(string unhandledType, string assembly)
{
List<string> lines = new List<string>();
Process testProcess = new Process();
testProcess.StartInfo.FileName = Path.Combine(Environment.GetEnvironmentVariable("CORE_ROOT"), "corerun");
- testProcess.StartInfo.Arguments = Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), "unhandled.dll") + " " + unhandledType;
+ testProcess.StartInfo.Arguments = Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), assembly) + " " + unhandledType;
testProcess.StartInfo.RedirectStandardError = true;
// Disable creating dump since the target process is expected to fail with an unhandled exception
testProcess.StartInfo.Environment.Remove("DOTNET_DbgEnableMiniDump");
@@ -116,8 +116,10 @@ namespace TestUnhandledExceptionTester
[Fact]
public static void TestEntryPoint()
{
- RunExternalProcess("main");
- RunExternalProcess("foreign");
+ RunExternalProcess("main", "unhandled.dll");
+ RunExternalProcess("foreign", "unhandled.dll");
+ File.Delete(Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), "dependencytodelete.dll"));
+ RunExternalProcess("missingdependency", "unhandledmissingdependency.dll");
}
}
}
diff --git a/src/tests/baseservices/exceptions/unhandled/unhandledTester.csproj b/src/tests/baseservices/exceptions/unhandled/unhandledTester.csproj
index 99e154e74cd6..fff804f1924b 100644
--- a/src/tests/baseservices/exceptions/unhandled/unhandledTester.csproj
+++ b/src/tests/baseservices/exceptions/unhandled/unhandledTester.csproj
@@ -17,5 +17,10 @@
<OutputItemType>Content</OutputItemType>
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</ProjectReference>
+ <ProjectReference Include="unhandledmissingdependency.csproj">
+ <ReferenceOutputAssembly>false</ReferenceOutputAssembly>
+ <OutputItemType>Content</OutputItemType>
+ <CopyToOutputDirectory>Always</CopyToOutputDirectory>
+ </ProjectReference>
</ItemGroup>
</Project>
diff --git a/src/tests/baseservices/exceptions/unhandled/unhandledmissingdependency.cs b/src/tests/baseservices/exceptions/unhandled/unhandledmissingdependency.cs
new file mode 100644
index 000000000000..4863cafae875
--- /dev/null
+++ b/src/tests/baseservices/exceptions/unhandled/unhandledmissingdependency.cs
@@ -0,0 +1,12 @@
+using Dependency;
+
+namespace DependencyTest
+{
+ internal class Program
+ {
+ static void Main(string[] args)
+ {
+ DependencyClass.Hello();
+ }
+ }
+}
diff --git a/src/tests/baseservices/exceptions/unhandled/unhandledmissingdependency.csproj b/src/tests/baseservices/exceptions/unhandled/unhandledmissingdependency.csproj
new file mode 100644
index 000000000000..d388263ca3bc
--- /dev/null
+++ b/src/tests/baseservices/exceptions/unhandled/unhandledmissingdependency.csproj
@@ -0,0 +1,19 @@
+<Project Sdk="Microsoft.NET.Sdk">
+ <PropertyGroup>
+ <!-- Needs explicit Main to return the proper "unhandled exception" exit code -->
+ <RequiresProcessIsolation>true</RequiresProcessIsolation>
+ <ReferenceXUnitWrapperGenerator>false</ReferenceXUnitWrapperGenerator>
+ <CLRTestKind>BuildOnly</CLRTestKind>
+ <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
+ <Optimize>true</Optimize>
+ </PropertyGroup>
+ <ItemGroup>
+ <Compile Include="unhandledmissingdependency.cs" />
+ </ItemGroup>
+ <ItemGroup>
+ <ProjectReference Include="dependencytodelete.csproj" />
+ </ItemGroup>
+ <ItemGroup>
+ <ProjectReference Include="$(TestSourceDir)Common/CoreCLRTestLibrary/CoreCLRTestLibrary.csproj" />
+ </ItemGroup>
+</Project>
diff --git a/src/tests/issues.targets b/src/tests/issues.targets
index 473e82bc3e96..9138cc7b845d 100644
--- a/src/tests/issues.targets
+++ b/src/tests/issues.targets
@@ -75,7 +75,7 @@
<ExcludeList Include="$(XunitTestBinBase)/readytorun/GenericCycleDetection/Depth3Test/*">
<Issue>https://github.com/dotnet/runtime/issues/88586</Issue>
</ExcludeList>
- <ExcludeList Include="$(XunitTestBinBase)/Interop/Swift/**">
+ <ExcludeList Include="$(XunitTestBinBase)/Interop/Swift/SwiftInvalidCallConv/*">
<Issue>https://github.com/dotnet/runtime/issues/93631</Issue>
</ExcludeList>
</ItemGroup>
@@ -1870,6 +1870,9 @@
<ExcludeList Include="$(XunitTestBinBase)/JIT/Regression/JitBlue/Runtime_90219/Runtime_90219/*">
<Issue>https://github.com/dotnet/runtime/issues/90374</Issue>
</ExcludeList>
+ <ExcludeList Include="$(XunitTestBinBase)/JIT/opt/Structs/MemsetMemcpyNullref/*">
+ <Issue>https://github.com/dotnet/runtime/issues/98628</Issue>
+ </ExcludeList>
</ItemGroup>
<!-- Known failures for mono runtime on Windows -->
diff --git a/src/tests/nativeaot/SmokeTests/Preinitialization/Preinitialization.cs b/src/tests/nativeaot/SmokeTests/Preinitialization/Preinitialization.cs
index d08e87bbaa76..33fffb9c09c6 100644
--- a/src/tests/nativeaot/SmokeTests/Preinitialization/Preinitialization.cs
+++ b/src/tests/nativeaot/SmokeTests/Preinitialization/Preinitialization.cs
@@ -404,7 +404,15 @@ class TestReferenceTypeAllocation
public static void Run()
{
- Assert.IsPreinitialized(typeof(TestReferenceTypeAllocation));
+ if (RuntimeInformation.ProcessArchitecture is Architecture.Arm or Architecture.Wasm)
+ {
+ // Because of the double field, this is not preinitialized
+ Assert.IsLazyInitialized(typeof(TestReferenceTypeAllocation));
+ }
+ else
+ {
+ Assert.IsPreinitialized(typeof(TestReferenceTypeAllocation));
+ }
Assert.AreEqual(12345, s_referenceType.IntValue);
Assert.AreEqual(3.14159, s_referenceType.DoubleValue);
}
diff --git a/src/tests/nativeaot/SmokeTests/TrimmingBehaviors/DeadCodeElimination.cs b/src/tests/nativeaot/SmokeTests/TrimmingBehaviors/DeadCodeElimination.cs
index d54c801ccee3..bf4b8639cefa 100644
--- a/src/tests/nativeaot/SmokeTests/TrimmingBehaviors/DeadCodeElimination.cs
+++ b/src/tests/nativeaot/SmokeTests/TrimmingBehaviors/DeadCodeElimination.cs
@@ -340,6 +340,8 @@ class DeadCodeElimination
class TestTypeEquals
{
+ sealed class Gen<T> { }
+
sealed class Never { }
static Type s_type = null;
@@ -350,6 +352,9 @@ class DeadCodeElimination
// despite the typeof
Console.WriteLine(s_type == typeof(Never));
+ // This was a compiler crash
+ Console.WriteLine(typeof(object) == typeof(Gen<>));
+
#if !DEBUG
ThrowIfPresent(typeof(TestTypeEquals), nameof(Never));
#endif
diff --git a/src/tests/profiler/multiple/multiple.cs b/src/tests/profiler/multiple/multiple.cs
index aa0388fa0eb2..0d686ae0691a 100644
--- a/src/tests/profiler/multiple/multiple.cs
+++ b/src/tests/profiler/multiple/multiple.cs
@@ -35,9 +35,9 @@ namespace Profiler.Tests
}
Console.WriteLine("Waiting for profilers to all detach");
- if (!_profilerDone.WaitOne(TimeSpan.FromMinutes(5)))
+ if (!_profilerDone.WaitOne(TimeSpan.FromMinutes(10)))
{
- Console.WriteLine("Profiler did not set the callback, test will fail.");
+ throw new Exception("Test timed out waiting for the profilers to set the callback, test will fail.");
}
return 100;
diff --git a/src/tools/illink/illink.sln b/src/tools/illink/illink.sln
index 87e99d208c7f..9b8d904942ba 100644
--- a/src/tools/illink/illink.sln
+++ b/src/tools/illink/illink.sln
@@ -223,6 +223,7 @@ Global
SolutionGuid = {E43A3901-42B0-48CA-BB36-5CD40A99A6EE}
EndGlobalSection
GlobalSection(SharedMSBuildProjectFiles) = preSolution
+ test\Trimming.Tests.Shared\Trimming.Tests.Shared.projitems*{400a1561-b6b6-482d-9e4c-3ddaede5bd07}*SharedItemsImports = 5
src\ILLink.Shared\ILLink.Shared.projitems*{dd28e2b1-057b-4b4d-a04d-b2ebd9e76e46}*SharedItemsImports = 5
src\ILLink.Shared\ILLink.Shared.projitems*{f1a44a78-34ee-408b-8285-9a26f0e7d4f2}*SharedItemsImports = 5
src\ILLink.Shared\ILLink.Shared.projitems*{ff598e93-8e9e-4091-9f50-61a7572663ae}*SharedItemsImports = 13
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/FeatureChecksValue.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/FeatureChecksValue.cs
index 268833431274..028628f2dd59 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/FeatureChecksValue.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/FeatureChecksValue.cs
@@ -13,11 +13,15 @@ namespace ILLink.RoslynAnalyzer.DataFlow
// For now, this is only designed to track the built-in "features"/"capabilities"
// like RuntimeFeatures.IsDynamicCodeSupported, where a true return value
// indicates that a feature/capability is available.
- public record struct FeatureChecksValue : INegate<FeatureChecksValue>
+ public record struct FeatureChecksValue : INegate<FeatureChecksValue>, IDeepCopyValue<FeatureChecksValue>
{
public ValueSet<string> EnabledFeatures;
public ValueSet<string> DisabledFeatures;
+ public static readonly FeatureChecksValue All = new FeatureChecksValue (ValueSet<string>.Unknown, ValueSet<string>.Empty);
+
+ public static readonly FeatureChecksValue None = new FeatureChecksValue (ValueSet<string>.Empty, ValueSet<string>.Empty);
+
public FeatureChecksValue (string enabledFeature)
{
EnabledFeatures = new ValueSet<string> (enabledFeature);
@@ -48,5 +52,10 @@ namespace ILLink.RoslynAnalyzer.DataFlow
{
return new FeatureChecksValue (DisabledFeatures.DeepCopy (), EnabledFeatures.DeepCopy ());
}
+
+ public FeatureChecksValue DeepCopy ()
+ {
+ return new FeatureChecksValue (EnabledFeatures.DeepCopy (), DisabledFeatures.DeepCopy ());
+ }
}
}
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/FeatureCheckVisitor.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/FeatureChecksVisitor.cs
index 707294718fda..7c0935eb05ea 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/FeatureCheckVisitor.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/FeatureChecksVisitor.cs
@@ -24,7 +24,7 @@ namespace ILLink.RoslynAnalyzer.DataFlow
// (a set features that are checked to be enabled or disabled).
// The visitor takes a LocalDataFlowState as an argument, allowing for checks that
// depend on the current dataflow state.
- public class FeatureChecksVisitor : OperationVisitor<StateValue, FeatureChecksValue?>
+ public class FeatureChecksVisitor : OperationVisitor<StateValue, FeatureChecksValue>
{
DataFlowAnalyzerContext _dataFlowAnalyzerContext;
@@ -33,32 +33,48 @@ namespace ILLink.RoslynAnalyzer.DataFlow
_dataFlowAnalyzerContext = dataFlowAnalyzerContext;
}
- public override FeatureChecksValue? VisitArgument (IArgumentOperation operation, StateValue state)
+ public override FeatureChecksValue DefaultVisit (IOperation operation, StateValue state)
+ {
+ // Visiting a non-understood pattern should return the empty set of features, which will
+ // prevent this check from acting as a guard for any feature.
+ return FeatureChecksValue.None;
+ }
+
+ public override FeatureChecksValue VisitArgument (IArgumentOperation operation, StateValue state)
{
return Visit (operation.Value, state);
}
- public override FeatureChecksValue? VisitPropertyReference (IPropertyReferenceOperation operation, StateValue state)
+ public override FeatureChecksValue VisitPropertyReference (IPropertyReferenceOperation operation, StateValue state)
{
+ // A single property may serve as a feature check for multiple features.
+ FeatureChecksValue featureChecks = FeatureChecksValue.None;
foreach (var analyzer in _dataFlowAnalyzerContext.EnabledRequiresAnalyzers) {
- if (analyzer.IsRequiresCheck (_dataFlowAnalyzerContext.Compilation, operation.Property)) {
- return new FeatureChecksValue (analyzer.FeatureName);
+ if (analyzer.IsFeatureCheck (operation.Property, _dataFlowAnalyzerContext.Compilation)) {
+ var featureCheck = new FeatureChecksValue (analyzer.RequiresAttributeFullyQualifiedName);
+ featureChecks = featureChecks.And (featureCheck);
}
}
- return null;
+ return featureChecks;
}
- public override FeatureChecksValue? VisitUnaryOperator (IUnaryOperation operation, StateValue state)
+ public override FeatureChecksValue VisitUnaryOperator (IUnaryOperation operation, StateValue state)
{
if (operation.OperatorKind is not UnaryOperatorKind.Not)
- return null;
+ return FeatureChecksValue.None;
- FeatureChecksValue? context = Visit (operation.Operand, state);
- if (context == null)
- return null;
+ FeatureChecksValue context = Visit (operation.Operand, state);
+ return context.Negate ();
+ }
- return context.Value.Negate ();
+ public override FeatureChecksValue VisitLiteral (ILiteralOperation operation, StateValue state)
+ {
+ // 'false' can guard any feature
+ if (GetConstantBool (operation.ConstantValue) is false)
+ return FeatureChecksValue.All;
+
+ return FeatureChecksValue.None;
}
public bool? GetLiteralBool (IOperation operation)
@@ -77,7 +93,7 @@ namespace ILLink.RoslynAnalyzer.DataFlow
return value;
}
- public override FeatureChecksValue? VisitBinaryOperator (IBinaryOperation operation, StateValue state)
+ public override FeatureChecksValue VisitBinaryOperator (IBinaryOperation operation, StateValue state)
{
bool expectEqual;
switch (operation.OperatorKind) {
@@ -88,36 +104,32 @@ namespace ILLink.RoslynAnalyzer.DataFlow
expectEqual = false;
break;
default:
- return null;
+ return FeatureChecksValue.None;
}
if (GetLiteralBool (operation.LeftOperand) is bool leftBool) {
- if (Visit (operation.RightOperand, state) is not FeatureChecksValue rightValue)
- return null;
+ FeatureChecksValue rightValue = Visit (operation.RightOperand, state);
return leftBool == expectEqual
? rightValue
: rightValue.Negate ();
}
if (GetLiteralBool (operation.RightOperand) is bool rightBool) {
- if (Visit (operation.LeftOperand, state) is not FeatureChecksValue leftValue)
- return null;
+ FeatureChecksValue leftValue = Visit (operation.LeftOperand, state);
return rightBool == expectEqual
? leftValue
: leftValue.Negate ();
}
- return null;
+ return FeatureChecksValue.None;
}
- public override FeatureChecksValue? VisitIsPattern (IIsPatternOperation operation, StateValue state)
+ public override FeatureChecksValue VisitIsPattern (IIsPatternOperation operation, StateValue state)
{
if (GetExpectedValueFromPattern (operation.Pattern) is not bool patternValue)
- return null;
-
- if (Visit (operation.Value, state) is not FeatureChecksValue value)
- return null;
+ return FeatureChecksValue.None;
+ FeatureChecksValue value = Visit (operation.Value, state);
return patternValue
? value
: value.Negate ();
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/LocalDataFlowVisitor.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/LocalDataFlowVisitor.cs
index bbaeff53f0c5..dc2345b56463 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/LocalDataFlowVisitor.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlow/LocalDataFlowVisitor.cs
@@ -88,12 +88,12 @@ namespace ILLink.RoslynAnalyzer.DataFlow
return null;
var branchValue = Visit (branchValueOperation, state);
-
+ TConditionValue conditionValue = GetConditionValue (branchValueOperation, state);
if (block.Block.ConditionKind != ControlFlowConditionKind.None) {
// BranchValue may represent a value used in a conditional branch to the ConditionalSuccessor.
// If so, give the analysis an opportunity to model the checked condition, and return the model
// of the condition back to the generic analysis. It will be applied to the state of each outgoing branch.
- return GetConditionValue (branchValueOperation, state);
+ return conditionValue;
}
// If not, the BranchValue represents a return or throw value associated with the FallThroughSuccessor of this block.
@@ -118,10 +118,13 @@ namespace ILLink.RoslynAnalyzer.DataFlow
// We don't want the return operation because this might have multiple possible return values in general.
var current = state.Current;
HandleReturnValue (branchValue, branchValueOperation, in current.Context);
+ // Must be called for every return value even if it did not return an understood condition,
+ // because the non-understood conditions will produce warnings for FeatureCheck properties.
+ HandleReturnConditionValue (conditionValue, branchValueOperation);
return null;
}
- public abstract TConditionValue? GetConditionValue (
+ public abstract TConditionValue GetConditionValue (
IOperation branchValueOperation,
LocalDataFlowState<TValue, TContext, TValueLattice, TContextLattice> state);
@@ -146,6 +149,10 @@ namespace ILLink.RoslynAnalyzer.DataFlow
IOperation operation,
in TContext context);
+ public abstract void HandleReturnConditionValue (
+ TConditionValue returnConditionValue,
+ IOperation branchValueOperation);
+
// This is called for any method call, which includes:
// - Normal invocation operation
// - Accessing property value - which is treated as a call to the getter
@@ -776,9 +783,7 @@ namespace ILLink.RoslynAnalyzer.DataFlow
// Get the condition value that is being asserted. If the attribute is DoesNotReturnIf(true),
// the condition value needs to be negated so that we can assert the false condition.
- if (GetConditionValue (argumentOperation, state) is not TConditionValue conditionValue)
- continue;
-
+ TConditionValue conditionValue = GetConditionValue (argumentOperation, state);
var current = state.Current;
ApplyCondition (
doesNotReturnIfConditionValue == false
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/DataflowAnalyzerContext.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlowAnalyzerContext.cs
index 406992551a60..406992551a60 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/DataflowAnalyzerContext.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/DataFlowAnalyzerContext.cs
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/DynamicallyAccessedMembersAnalyzer.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/DynamicallyAccessedMembersAnalyzer.cs
index 173bb667a4d1..8c506606c292 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/DynamicallyAccessedMembersAnalyzer.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/DynamicallyAccessedMembersAnalyzer.cs
@@ -21,6 +21,8 @@ namespace ILLink.RoslynAnalyzer
internal const string DynamicallyAccessedMembersAttribute = nameof (DynamicallyAccessedMembersAttribute);
public const string attributeArgument = "attributeArgument";
public const string FullyQualifiedDynamicallyAccessedMembersAttribute = "System.Diagnostics.CodeAnalysis." + DynamicallyAccessedMembersAttribute;
+ public const string FullyQualifiedFeatureCheckAttribute = "System.Diagnostics.CodeAnalysis.FeatureCheckAttribute";
+ public const string FullyQualifiedFeatureDependsOnAttribute = "System.Diagnostics.CodeAnalysis.FeatureDependsOnAttribute";
public static Lazy<ImmutableArray<RequiresAnalyzerBase>> RequiresAnalyzers { get; } = new Lazy<ImmutableArray<RequiresAnalyzerBase>> (GetRequiresAnalyzers);
static ImmutableArray<RequiresAnalyzerBase> GetRequiresAnalyzers () =>
ImmutableArray.Create<RequiresAnalyzerBase> (
@@ -51,6 +53,8 @@ namespace ILLink.RoslynAnalyzer
diagDescriptorsArrayBuilder.Add (DiagnosticDescriptors.GetDiagnosticDescriptor (DiagnosticId.UnrecognizedTypeNameInTypeGetType));
diagDescriptorsArrayBuilder.Add (DiagnosticDescriptors.GetDiagnosticDescriptor (DiagnosticId.UnrecognizedParameterInMethodCreateInstance));
diagDescriptorsArrayBuilder.Add (DiagnosticDescriptors.GetDiagnosticDescriptor (DiagnosticId.ParametersOfAssemblyCreateInstanceCannotBeAnalyzed));
+ diagDescriptorsArrayBuilder.Add (DiagnosticDescriptors.GetDiagnosticDescriptor (DiagnosticId.ReturnValueDoesNotMatchFeatureChecks));
+ diagDescriptorsArrayBuilder.Add (DiagnosticDescriptors.GetDiagnosticDescriptor (DiagnosticId.InvalidFeatureCheck));
foreach (var requiresAnalyzer in RequiresAnalyzers.Value) {
foreach (var diagnosticDescriptor in requiresAnalyzer.SupportedDiagnostics)
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/ILLink.RoslynAnalyzer.csproj b/src/tools/illink/src/ILLink.RoslynAnalyzer/ILLink.RoslynAnalyzer.csproj
index 7db05c1cde9a..bc410523d5d7 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/ILLink.RoslynAnalyzer.csproj
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/ILLink.RoslynAnalyzer.csproj
@@ -1,4 +1,4 @@
-<Project Sdk="Microsoft.NET.Sdk">
+<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>netstandard2.0</TargetFramework>
@@ -9,6 +9,12 @@
<AnalysisLevel>Latest</AnalysisLevel>
<NoWarn Condition="'$(DotNetBuildSourceOnly)' == 'true'">$(NoWarn);CS8524</NoWarn>
<AnalyzerLanguage>cs</AnalyzerLanguage>
+ <!-- The analyzer needs to process deeply nested expressions in corelib.
+ This can blow up the stack if using unoptimized code (due to large
+ stack frames with many temporary locals for debugging support), so we
+ optimize the analyzer even in Debug builds. Note: we still use the
+ Debug configuration to get Debug asserts. -->
+ <Optimize>true</Optimize>
</PropertyGroup>
<ItemGroup>
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/ISymbolExtensions.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/ISymbolExtensions.cs
index 7e830f7c6ecd..42ad2f9c9ae1 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/ISymbolExtensions.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/ISymbolExtensions.cs
@@ -1,9 +1,14 @@
// Copyright (c) .NET Foundation and contributors. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+using System.Collections.Immutable;
+using System.Collections.Generic;
using System.Diagnostics.CodeAnalysis;
+using System.Linq;
using System.Text;
using Microsoft.CodeAnalysis;
+using ILLink.RoslynAnalyzer.DataFlow;
+using ILLink.Shared.DataFlow;
namespace ILLink.RoslynAnalyzer
{
@@ -34,6 +39,14 @@ namespace ILLink.RoslynAnalyzer
return false;
}
+ internal static IEnumerable<AttributeData> GetAttributes (this ISymbol member, string attributeName)
+ {
+ foreach (var attr in member.GetAttributes ()) {
+ if (attr.AttributeClass is { } attrClass && attrClass.HasName (attributeName))
+ yield return attr;
+ }
+ }
+
internal static DynamicallyAccessedMemberTypes GetDynamicallyAccessedMemberTypes (this ISymbol symbol)
{
if (!TryGetAttribute (symbol, DynamicallyAccessedMembersAnalyzer.DynamicallyAccessedMembersAttribute, out var dynamicallyAccessedMembers))
@@ -58,6 +71,28 @@ namespace ILLink.RoslynAnalyzer
return (DynamicallyAccessedMemberTypes) dynamicallyAccessedMembers.ConstructorArguments[0].Value!;
}
+ internal static ValueSet<string> GetFeatureCheckAnnotations (this IPropertySymbol propertySymbol)
+ {
+ HashSet<string> featureSet = new ();
+ foreach (var attributeData in propertySymbol.GetAttributes (DynamicallyAccessedMembersAnalyzer.FullyQualifiedFeatureCheckAttribute)) {
+ if (attributeData.ConstructorArguments is [TypedConstant { Value: INamedTypeSymbol featureType }])
+ AddFeatures (featureType);
+ }
+ return featureSet.Count == 0 ? ValueSet<string>.Empty : new ValueSet<string> (featureSet);
+
+ void AddFeatures (INamedTypeSymbol featureType) {
+ var featureName = featureType.GetDisplayName ();
+ if (!featureSet.Add (featureName))
+ return;
+
+ // Look at FeatureDependsOn attributes on the feature type.
+ foreach (var featureTypeAttributeData in featureType.GetAttributes (DynamicallyAccessedMembersAnalyzer.FullyQualifiedFeatureDependsOnAttribute)) {
+ if (featureTypeAttributeData.ConstructorArguments is [TypedConstant { Value: INamedTypeSymbol featureTypeSymbol }])
+ AddFeatures (featureTypeSymbol);
+ }
+ }
+ }
+
internal static bool TryGetReturnAttribute (this IMethodSymbol member, string attributeName, [NotNullWhen (returnValue: true)] out AttributeData? attribute)
{
attribute = null;
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresAnalyzerBase.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresAnalyzerBase.cs
index d951404845cd..5b4c820c4c6d 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresAnalyzerBase.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresAnalyzerBase.cs
@@ -5,8 +5,9 @@ using System;
using System.Collections.Immutable;
using System.Diagnostics.CodeAnalysis;
using System.Linq;
-using ILLink.Shared;
using ILLink.RoslynAnalyzer.DataFlow;
+using ILLink.Shared;
+using ILLink.Shared.DataFlow;
using Microsoft.CodeAnalysis;
using Microsoft.CodeAnalysis.CSharp;
using Microsoft.CodeAnalysis.CSharp.Syntax;
@@ -19,9 +20,7 @@ namespace ILLink.RoslynAnalyzer
{
private protected abstract string RequiresAttributeName { get; }
- internal abstract string FeatureName { get; }
-
- private protected abstract string RequiresAttributeFullyQualifiedName { get; }
+ internal abstract string RequiresAttributeFullyQualifiedName { get; }
private protected abstract DiagnosticTargets AnalyzerDiagnosticTargets { get; }
@@ -301,7 +300,23 @@ namespace ILLink.RoslynAnalyzer
// - false return value indicating that a feature is supported
// - feature settings supplied by the project
// - custom feature checks defined in library code
- internal virtual bool IsRequiresCheck (Compilation compilation, IPropertySymbol propertySymbol) => false;
+ private protected virtual bool IsRequiresCheck (IPropertySymbol propertySymbol, Compilation compilation) => false;
+
+ internal static bool IsAnnotatedFeatureCheck (IPropertySymbol propertySymbol, string featureName)
+ {
+ // Only respect FeatureCheckAttribute on static boolean properties.
+ if (!propertySymbol.IsStatic || propertySymbol.Type.SpecialType != SpecialType.System_Boolean)
+ return false;
+
+ ValueSet<string> featureCheckAnnotations = propertySymbol.GetFeatureCheckAnnotations ();
+ return featureCheckAnnotations.Contains (featureName);
+ }
+
+ internal bool IsFeatureCheck (IPropertySymbol propertySymbol, Compilation compilation)
+ {
+ return IsAnnotatedFeatureCheck (propertySymbol, RequiresAttributeFullyQualifiedName)
+ || IsRequiresCheck (propertySymbol, compilation);
+ }
internal bool CheckAndCreateRequiresDiagnostic (
IOperation operation,
@@ -312,7 +327,7 @@ namespace ILLink.RoslynAnalyzer
[NotNullWhen (true)] out Diagnostic? diagnostic)
{
// Warnings are not emitted if the featureContext says the feature is available.
- if (featureContext.IsEnabled (FeatureName)) {
+ if (featureContext.IsEnabled (RequiresAttributeFullyQualifiedName)) {
diagnostic = null;
return false;
}
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresAssemblyFilesAnalyzer.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresAssemblyFilesAnalyzer.cs
index e8807896d937..8949b249b35e 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresAssemblyFilesAnalyzer.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresAssemblyFilesAnalyzer.cs
@@ -36,9 +36,7 @@ namespace ILLink.RoslynAnalyzer
private protected override string RequiresAttributeName => RequiresAssemblyFilesAttribute;
- internal override string FeatureName => "AssemblyFiles";
-
- private protected override string RequiresAttributeFullyQualifiedName => RequiresAssemblyFilesAttributeFullyQualifiedName;
+ internal override string RequiresAttributeFullyQualifiedName => RequiresAssemblyFilesAttributeFullyQualifiedName;
private protected override DiagnosticTargets AnalyzerDiagnosticTargets => DiagnosticTargets.MethodOrConstructor | DiagnosticTargets.Property | DiagnosticTargets.Event;
@@ -61,7 +59,7 @@ namespace ILLink.RoslynAnalyzer
return true;
}
- internal override bool IsRequiresCheck (Compilation compilation, IPropertySymbol propertySymbol)
+ private protected override bool IsRequiresCheck (IPropertySymbol propertySymbol, Compilation compilation)
{
// "IsAssemblyFilesSupported" is treated as a requires check for testing purposes only, and
// is not officially-supported product behavior.
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresDynamicCodeAnalyzer.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresDynamicCodeAnalyzer.cs
index 5232ca9a9854..34bb7808d203 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresDynamicCodeAnalyzer.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresDynamicCodeAnalyzer.cs
@@ -25,9 +25,7 @@ namespace ILLink.RoslynAnalyzer
private protected override string RequiresAttributeName => RequiresDynamicCodeAttribute;
- internal override string FeatureName => "DynamicCode";
-
- private protected override string RequiresAttributeFullyQualifiedName => FullyQualifiedRequiresDynamicCodeAttribute;
+ internal override string RequiresAttributeFullyQualifiedName => FullyQualifiedRequiresDynamicCodeAttribute;
private protected override DiagnosticTargets AnalyzerDiagnosticTargets => DiagnosticTargets.MethodOrConstructor | DiagnosticTargets.Class;
@@ -40,7 +38,7 @@ namespace ILLink.RoslynAnalyzer
internal override bool IsAnalyzerEnabled (AnalyzerOptions options) =>
options.IsMSBuildPropertyValueTrue (MSBuildPropertyOptionNames.EnableAotAnalyzer);
- internal override bool IsRequiresCheck (Compilation compilation, IPropertySymbol propertySymbol) {
+ private protected override bool IsRequiresCheck (IPropertySymbol propertySymbol, Compilation compilation) {
var runtimeFeaturesType = compilation.GetTypeByMetadataName ("System.Runtime.CompilerServices.RuntimeFeature");
if (runtimeFeaturesType == null)
return false;
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresUnreferencedCodeAnalyzer.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresUnreferencedCodeAnalyzer.cs
index 3623150b7520..69c38629c43e 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresUnreferencedCodeAnalyzer.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/RequiresUnreferencedCodeAnalyzer.cs
@@ -49,11 +49,7 @@ namespace ILLink.RoslynAnalyzer
private protected override string RequiresAttributeName => RequiresUnreferencedCodeAttribute;
- public const string UnreferencedCode = nameof (UnreferencedCode);
-
- internal override string FeatureName => UnreferencedCode;
-
- private protected override string RequiresAttributeFullyQualifiedName => FullyQualifiedRequiresUnreferencedCodeAttribute;
+ internal override string RequiresAttributeFullyQualifiedName => FullyQualifiedRequiresUnreferencedCodeAttribute;
private protected override DiagnosticTargets AnalyzerDiagnosticTargets => DiagnosticTargets.MethodOrConstructor | DiagnosticTargets.Class;
@@ -66,7 +62,7 @@ namespace ILLink.RoslynAnalyzer
internal override bool IsAnalyzerEnabled (AnalyzerOptions options) =>
options.IsMSBuildPropertyValueTrue (MSBuildPropertyOptionNames.EnableTrimAnalyzer);
- internal override bool IsRequiresCheck (Compilation compilation, IPropertySymbol propertySymbol)
+ private protected override bool IsRequiresCheck (IPropertySymbol propertySymbol, Compilation compilation)
{
// "IsUnreferencedCodeSupported" is treated as a requires check for testing purposes only, and
// is not officially-supported product behavior.
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/FeatureCheckReturnValuePattern.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/FeatureCheckReturnValuePattern.cs
new file mode 100644
index 000000000000..b46ef52e1f3a
--- /dev/null
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/FeatureCheckReturnValuePattern.cs
@@ -0,0 +1,70 @@
+// Copyright (c) .NET Foundation and contributors. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
+using System.Collections.Generic;
+using ILLink.Shared;
+using ILLink.Shared.DataFlow;
+using ILLink.Shared.TrimAnalysis;
+using ILLink.RoslynAnalyzer.DataFlow;
+using Microsoft.CodeAnalysis;
+
+namespace ILLink.RoslynAnalyzer.TrimAnalysis
+{
+ public readonly record struct FeatureCheckReturnValuePattern
+ {
+ public FeatureChecksValue ReturnValue { get; init; }
+ public ValueSet<string> FeatureCheckAnnotations { get; init; }
+ public IOperation Operation { get; init; }
+ public IPropertySymbol OwningSymbol { get; init; }
+
+ public FeatureCheckReturnValuePattern (
+ FeatureChecksValue returnValue,
+ ValueSet<string> featureCheckAnnotations,
+ IOperation operation,
+ IPropertySymbol owningSymbol)
+ {
+ ReturnValue = returnValue.DeepCopy ();
+ FeatureCheckAnnotations = featureCheckAnnotations.DeepCopy ();
+ Operation = operation;
+ OwningSymbol = owningSymbol;
+ }
+
+ public IEnumerable<Diagnostic> CollectDiagnostics (DataFlowAnalyzerContext context)
+ {
+ var diagnosticContext = new DiagnosticContext (Operation.Syntax.GetLocation ());
+ // For now, feature check validation is enabled only when trim analysis is enabled.
+ if (!context.EnableTrimAnalyzer)
+ return diagnosticContext.Diagnostics;
+
+ if (!OwningSymbol.IsStatic || OwningSymbol.Type.SpecialType != SpecialType.System_Boolean) {
+ // Warn about invalid feature checks (non-static or non-bool properties)
+ diagnosticContext.AddDiagnostic (
+ DiagnosticId.InvalidFeatureCheck);
+ return diagnosticContext.Diagnostics;
+ }
+
+ if (ReturnValue == FeatureChecksValue.All)
+ return diagnosticContext.Diagnostics;
+
+ ValueSet<string> returnValueFeatures = ReturnValue.EnabledFeatures;
+ // For any analyzer-supported feature that this property is declared to guard,
+ // the abstract return value must include that feature
+ // (indicating it is known to be enabled when the return value is true).
+ foreach (string feature in FeatureCheckAnnotations.GetKnownValues ()) {
+ foreach (var analyzer in context.EnabledRequiresAnalyzers) {
+ if (feature != analyzer.RequiresAttributeFullyQualifiedName)
+ continue;
+
+ if (!returnValueFeatures.Contains (feature)) {
+ diagnosticContext.AddDiagnostic (
+ DiagnosticId.ReturnValueDoesNotMatchFeatureChecks,
+ OwningSymbol.GetDisplayName (),
+ feature);
+ }
+ }
+ }
+
+ return diagnosticContext.Diagnostics;
+ }
+ }
+}
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisAssignmentPattern.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisAssignmentPattern.cs
index 5dfc31db3486..2ffcbc43ae88 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisAssignmentPattern.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisAssignmentPattern.cs
@@ -58,7 +58,7 @@ namespace ILLink.RoslynAnalyzer.TrimAnalysis
var diagnosticContext = new DiagnosticContext (Operation.Syntax.GetLocation ());
if (context.EnableTrimAnalyzer &&
!OwningSymbol.IsInRequiresUnreferencedCodeAttributeScope (out _) &&
- !FeatureContext.IsEnabled (RequiresUnreferencedCodeAnalyzer.UnreferencedCode)) {
+ !FeatureContext.IsEnabled (RequiresUnreferencedCodeAnalyzer.FullyQualifiedRequiresUnreferencedCodeAttribute)) {
foreach (var sourceValue in Source.AsEnumerable ()) {
foreach (var targetValue in Target.AsEnumerable ()) {
// The target should always be an annotated value, but the visitor design currently prevents
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisGenericInstantiationPattern.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisGenericInstantiationPattern.cs
index 8d484e66036b..26f275085fa8 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisGenericInstantiationPattern.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisGenericInstantiationPattern.cs
@@ -48,7 +48,7 @@ namespace ILLink.RoslynAnalyzer.TrimAnalysis
DiagnosticContext diagnosticContext = new (Operation.Syntax.GetLocation ());
if (context.EnableTrimAnalyzer &&
!OwningSymbol.IsInRequiresUnreferencedCodeAttributeScope (out _) &&
- !FeatureContext.IsEnabled (RequiresUnreferencedCodeAnalyzer.UnreferencedCode)) {
+ !FeatureContext.IsEnabled (RequiresUnreferencedCodeAnalyzer.FullyQualifiedRequiresUnreferencedCodeAttribute)) {
switch (GenericInstantiation) {
case INamedTypeSymbol type:
GenericArgumentDataFlow.ProcessGenericArgumentDataFlow (diagnosticContext, type);
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisMethodCallPattern.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisMethodCallPattern.cs
index 8341afa2ea5f..3dfd7fa28552 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisMethodCallPattern.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisMethodCallPattern.cs
@@ -77,7 +77,7 @@ namespace ILLink.RoslynAnalyzer.TrimAnalysis
DiagnosticContext diagnosticContext = new (Operation.Syntax.GetLocation ());
if (context.EnableTrimAnalyzer &&
!OwningSymbol.IsInRequiresUnreferencedCodeAttributeScope(out _) &&
- !FeatureContext.IsEnabled (RequiresUnreferencedCodeAnalyzer.UnreferencedCode))
+ !FeatureContext.IsEnabled (RequiresUnreferencedCodeAnalyzer.FullyQualifiedRequiresUnreferencedCodeAttribute))
{
TrimAnalysisVisitor.HandleCall(Operation, OwningSymbol, CalledMethod, Instance, Arguments, diagnosticContext, default, out var _);
}
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisPatternStore.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisPatternStore.cs
index 78de8fdf4235..dd66d802934b 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisPatternStore.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisPatternStore.cs
@@ -17,16 +17,20 @@ namespace ILLink.RoslynAnalyzer.TrimAnalysis
readonly Dictionary<IOperation, TrimAnalysisGenericInstantiationPattern> GenericInstantiationPatterns;
readonly Dictionary<IOperation, TrimAnalysisMethodCallPattern> MethodCallPatterns;
readonly Dictionary<IOperation, TrimAnalysisReflectionAccessPattern> ReflectionAccessPatterns;
+ readonly Dictionary<IOperation, FeatureCheckReturnValuePattern> FeatureCheckReturnValuePatterns;
readonly ValueSetLattice<SingleValue> Lattice;
readonly FeatureContextLattice FeatureContextLattice;
- public TrimAnalysisPatternStore (ValueSetLattice<SingleValue> lattice, FeatureContextLattice featureContextLattice)
+ public TrimAnalysisPatternStore (
+ ValueSetLattice<SingleValue> lattice,
+ FeatureContextLattice featureContextLattice)
{
AssignmentPatterns = new Dictionary<(IOperation, bool), TrimAnalysisAssignmentPattern> ();
FieldAccessPatterns = new Dictionary<IOperation, TrimAnalysisFieldAccessPattern> ();
GenericInstantiationPatterns = new Dictionary<IOperation, TrimAnalysisGenericInstantiationPattern> ();
MethodCallPatterns = new Dictionary<IOperation, TrimAnalysisMethodCallPattern> ();
ReflectionAccessPatterns = new Dictionary<IOperation, TrimAnalysisReflectionAccessPattern> ();
+ FeatureCheckReturnValuePatterns = new Dictionary<IOperation, FeatureCheckReturnValuePattern> ();
Lattice = lattice;
FeatureContextLattice = featureContextLattice;
}
@@ -89,6 +93,16 @@ namespace ILLink.RoslynAnalyzer.TrimAnalysis
ReflectionAccessPatterns[pattern.Operation] = pattern.Merge (Lattice, FeatureContextLattice, existingPattern);
}
+ public void Add (FeatureCheckReturnValuePattern pattern)
+ {
+ if (!FeatureCheckReturnValuePatterns.TryGetValue (pattern.Operation, out var existingPattern)) {
+ FeatureCheckReturnValuePatterns.Add (pattern.Operation, pattern);
+ return;
+ }
+
+ Debug.Assert (existingPattern == pattern, "Return values should be identical");
+ }
+
public IEnumerable<Diagnostic> CollectDiagnostics (DataFlowAnalyzerContext context)
{
foreach (var assignmentPattern in AssignmentPatterns.Values) {
@@ -115,6 +129,11 @@ namespace ILLink.RoslynAnalyzer.TrimAnalysis
foreach (var diagnostic in reflectionAccessPattern.CollectDiagnostics (context))
yield return diagnostic;
}
+
+ foreach (var returnValuePattern in FeatureCheckReturnValuePatterns.Values) {
+ foreach (var diagnostic in returnValuePattern.CollectDiagnostics (context))
+ yield return diagnostic;
+ }
}
}
}
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisReflectionAccessPattern.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisReflectionAccessPattern.cs
index 0e4c45a9f011..85897420596f 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisReflectionAccessPattern.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisReflectionAccessPattern.cs
@@ -50,7 +50,7 @@ namespace ILLink.RoslynAnalyzer.TrimAnalysis
DiagnosticContext diagnosticContext = new (Operation.Syntax.GetLocation ());
if (context.EnableTrimAnalyzer &&
!OwningSymbol.IsInRequiresUnreferencedCodeAttributeScope (out _) &&
- !FeatureContext.IsEnabled (RequiresUnreferencedCodeAnalyzer.UnreferencedCode)) {
+ !FeatureContext.IsEnabled (RequiresUnreferencedCodeAnalyzer.FullyQualifiedRequiresUnreferencedCodeAttribute)) {
foreach (var diagnostic in ReflectionAccessAnalyzer.GetDiagnosticsForReflectionAccessToDAMOnMethod (diagnosticContext, ReferencedMethod))
diagnosticContext.AddDiagnostic (diagnostic);
}
diff --git a/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisVisitor.cs b/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisVisitor.cs
index 6f118ac8c479..04efbb63ff5b 100644
--- a/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisVisitor.cs
+++ b/src/tools/illink/src/ILLink.RoslynAnalyzer/TrimAnalysis/TrimAnalysisVisitor.cs
@@ -43,6 +43,8 @@ namespace ILLink.RoslynAnalyzer.TrimAnalysis
FeatureChecksVisitor _featureChecksVisitor;
+ DataFlowAnalyzerContext _dataFlowAnalyzerContext;
+
public TrimAnalysisVisitor (
Compilation compilation,
LocalStateAndContextLattice<MultiValue, FeatureContext, ValueSetLattice<SingleValue>, FeatureContextLattice> lattice,
@@ -57,14 +59,15 @@ namespace ILLink.RoslynAnalyzer.TrimAnalysis
_multiValueLattice = lattice.LocalStateLattice.Lattice.ValueLattice;
TrimAnalysisPatterns = trimAnalysisPatterns;
_featureChecksVisitor = new FeatureChecksVisitor (dataFlowAnalyzerContext);
+ _dataFlowAnalyzerContext = dataFlowAnalyzerContext;
}
- public override FeatureChecksValue? GetConditionValue (IOperation branchValueOperation, StateValue state)
+ public override FeatureChecksValue GetConditionValue (IOperation branchValueOperation, StateValue state)
{
return _featureChecksVisitor.Visit (branchValueOperation, state);
}
- public override void ApplyCondition (FeatureChecksValue featureChecksValue, ref LocalStateAndContext<MultiValue, FeatureContext> currentState)
+ public override void ApplyCondition (FeatureChecksValue featureChecksValue, ref LocalStateAndContext<MultiValue, FeatureContext> currentState)
{
currentState.Context = currentState.Context.Union (new FeatureContext (featureChecksValue.EnabledFeatures));
}
@@ -426,6 +429,28 @@ namespace ILLink.RoslynAnalyzer.TrimAnalysis
}
}
+ public override void HandleReturnConditionValue (FeatureChecksValue returnConditionValue, IOperation operation)
+ {
+ // Return statements should only happen inside of method bodies.
+ Debug.Assert (OwningSymbol is IMethodSymbol);
+ if (OwningSymbol is not IMethodSymbol method)
+ return;
+
+ // FeatureCheck validation needs to happen only for properties.
+ if (method.MethodKind != MethodKind.PropertyGet)
+ return;
+
+ IPropertySymbol propertySymbol = (IPropertySymbol) method.AssociatedSymbol!;
+ var featureCheckAnnotations = propertySymbol.GetFeatureCheckAnnotations ();
+
+ // If there are no feature checks, there is nothing to validate.
+ if (featureCheckAnnotations.IsEmpty())
+ return;
+
+ TrimAnalysisPatterns.Add (
+ new FeatureCheckReturnValuePattern (returnConditionValue, featureCheckAnnotations, operation, propertySymbol));
+ }
+
public override MultiValue HandleDelegateCreation (IMethodSymbol method, IOperation operation, in FeatureContext featureContext)
{
TrimAnalysisPatterns.Add (new TrimAnalysisReflectionAccessPattern (
diff --git a/src/tools/illink/src/ILLink.Shared/DiagnosticId.cs b/src/tools/illink/src/ILLink.Shared/DiagnosticId.cs
index 1c33bb084a04..1a3a27abbbdc 100644
--- a/src/tools/illink/src/ILLink.Shared/DiagnosticId.cs
+++ b/src/tools/illink/src/ILLink.Shared/DiagnosticId.cs
@@ -202,6 +202,10 @@ namespace ILLink.Shared
GenericRecursionCycle = 3054,
CorrectnessOfAbstractDelegatesCannotBeGuaranteed = 3055,
RequiresDynamicCodeOnStaticConstructor = 3056,
+
+ // Feature guard diagnostic ids.
+ ReturnValueDoesNotMatchFeatureChecks = 4000,
+ InvalidFeatureCheck = 4001
}
public static class DiagnosticIdExtensions
diff --git a/src/tools/illink/src/ILLink.Shared/SharedStrings.resx b/src/tools/illink/src/ILLink.Shared/SharedStrings.resx
index 111c1c5877de..ae50b7dcd9ff 100644
--- a/src/tools/illink/src/ILLink.Shared/SharedStrings.resx
+++ b/src/tools/illink/src/ILLink.Shared/SharedStrings.resx
@@ -1197,4 +1197,16 @@
<data name="RedundantSuppressionTitle" xml:space="preserve">
<value>Unused 'UnconditionalSuppressMessageAttribute' found. Consider removing the unused warning suppression.</value>
</data>
-</root> \ No newline at end of file
+ <data name="ReturnValueDoesNotMatchFeatureChecksMessage" xml:space="preserve">
+ <value>Return value does not match FeatureCheckAttribute '{1}'.</value>
+ </data>
+ <data name="ReturnValueDoesNotMatchFeatureChecksTitle" xml:space="preserve">
+ <value>Return value does not match FeatureCheck annotations of the property. The check should return false whenever any of the features referenced in the FeatureCheck annotations is disabled.</value>
+ </data>
+ <data name="InvalidFeatureCheckMessage" xml:space="preserve">
+ <value>Invalid FeatureCheckAttribute. The attribute must be placed on a static boolean property with only a 'get' accessor.</value>
+ </data>
+ <data name="InvalidFeatureCheckTitle" xml:space="preserve">
+ <value>Invalid FeatureCheckAttribute.</value>
+ </data>
+</root>
diff --git a/src/tools/illink/src/linker/CompatibilitySuppressions.xml b/src/tools/illink/src/linker/CompatibilitySuppressions.xml
index 4a0a6296c4e2..7bf0a1e0ce69 100644
--- a/src/tools/illink/src/linker/CompatibilitySuppressions.xml
+++ b/src/tools/illink/src/linker/CompatibilitySuppressions.xml
@@ -255,6 +255,10 @@
</Suppression>
<Suppression>
<DiagnosticId>CP0001</DiagnosticId>
+ <Target>T:Mono.Linker.InterfaceImplementor</Target>
+ </Suppression>
+ <Suppression>
+ <DiagnosticId>CP0001</DiagnosticId>
<Target>T:Mono.Linker.InternalErrorException</Target>
</Suppression>
<Suppression>
@@ -1483,10 +1487,6 @@
</Suppression>
<Suppression>
<DiagnosticId>CP0002</DiagnosticId>
- <Target>M:Mono.Linker.OverrideInformation.get_IsStaticInterfaceMethodPair</Target>
- </Suppression>
- <Suppression>
- <DiagnosticId>CP0002</DiagnosticId>
<Target>M:Mono.Linker.Steps.BaseStep.get_MarkingHelpers</Target>
</Suppression>
<Suppression>
diff --git a/src/tools/illink/src/linker/Linker.Steps/MarkStep.cs b/src/tools/illink/src/linker/Linker.Steps/MarkStep.cs
index c7eb071913f5..32fd98cbe039 100644
--- a/src/tools/illink/src/linker/Linker.Steps/MarkStep.cs
+++ b/src/tools/illink/src/linker/Linker.Steps/MarkStep.cs
@@ -701,17 +701,16 @@ namespace Mono.Linker.Steps
var defaultImplementations = Annotations.GetDefaultInterfaceImplementations (method);
if (defaultImplementations is not null) {
foreach (var dimInfo in defaultImplementations) {
- ProcessDefaultImplementation (dimInfo.ImplementingType, dimInfo.InterfaceImpl, dimInfo.DefaultInterfaceMethod);
+ ProcessDefaultImplementation (dimInfo);
- var ov = new OverrideInformation (method, dimInfo.DefaultInterfaceMethod, Context);
- if (IsInterfaceImplementationMethodNeededByTypeDueToInterface (ov, dimInfo.ImplementingType))
- MarkMethod (ov.Override, new DependencyInfo (DependencyKind.Override, ov.Base), ScopeStack.CurrentScope.Origin);
+ if (IsInterfaceImplementationMethodNeededByTypeDueToInterface (dimInfo))
+ MarkMethod (dimInfo.Override, new DependencyInfo (DependencyKind.Override, dimInfo.Base), ScopeStack.CurrentScope.Origin);
}
}
var overridingMethods = Annotations.GetOverrides (method);
if (overridingMethods is not null) {
- foreach (var ov in overridingMethods) {
- if (IsInterfaceImplementationMethodNeededByTypeDueToInterface (ov, ov.Override.DeclaringType))
+ foreach (OverrideInformation ov in overridingMethods) {
+ if (IsInterfaceImplementationMethodNeededByTypeDueToInterface (ov))
MarkMethod (ov.Override, new DependencyInfo (DependencyKind.Override, ov.Base), ScopeStack.CurrentScope.Origin);
}
}
@@ -819,13 +818,14 @@ namespace Mono.Linker.Steps
return false;
}
- void ProcessDefaultImplementation (TypeDefinition typeWithDefaultImplementedInterfaceMethod, InterfaceImplementation implementation, MethodDefinition implementationMethod)
+ void ProcessDefaultImplementation (OverrideInformation ov)
{
- if ((!implementationMethod.IsStatic && !Annotations.IsInstantiated (typeWithDefaultImplementedInterfaceMethod))
- || implementationMethod.IsStatic && !Annotations.IsRelevantToVariantCasting (typeWithDefaultImplementedInterfaceMethod))
+ Debug.Assert (ov.IsOverrideOfInterfaceMember);
+ if ((!ov.Override.IsStatic && !Annotations.IsInstantiated (ov.InterfaceImplementor.Implementor))
+ || ov.Override.IsStatic && !Annotations.IsRelevantToVariantCasting (ov.InterfaceImplementor.Implementor))
return;
- MarkInterfaceImplementation (implementation);
+ MarkInterfaceImplementation (ov.InterfaceImplementor.InterfaceImplementation);
}
void MarkMarshalSpec (IMarshalInfoProvider spec, in DependencyInfo reason)
@@ -2549,11 +2549,11 @@ namespace Mono.Linker.Steps
/// <summary>
/// Returns true if the override method is required due to the interface that the base method is declared on. See doc at <see href="docs/methods-kept-by-interface.md"/> for explanation of logic.
/// </summary>
- bool IsInterfaceImplementationMethodNeededByTypeDueToInterface (OverrideInformation overrideInformation, TypeDefinition typeThatImplsInterface)
+ bool IsInterfaceImplementationMethodNeededByTypeDueToInterface (OverrideInformation overrideInformation)
{
var @base = overrideInformation.Base;
var method = overrideInformation.Override;
- Debug.Assert (@base.DeclaringType.IsInterface);
+ Debug.Assert (overrideInformation.IsOverrideOfInterfaceMember);
if (@base is null || method is null || @base.DeclaringType is null)
return false;
@@ -2562,7 +2562,7 @@ namespace Mono.Linker.Steps
// If the interface implementation is not marked, do not mark the implementation method
// A type that doesn't implement the interface isn't required to have methods that implement the interface.
- InterfaceImplementation? iface = overrideInformation.MatchingInterfaceImplementation;
+ InterfaceImplementation? iface = overrideInformation.InterfaceImplementor.InterfaceImplementation;
if (!((iface is not null && Annotations.IsMarked (iface))
|| IsInterfaceImplementationMarkedRecursively (method.DeclaringType, @base.DeclaringType)))
return false;
@@ -2580,12 +2580,12 @@ namespace Mono.Linker.Steps
// If the method is static and the implementing type is relevant to variant casting, mark the implementation method.
// A static method may only be called through a constrained call if the type is relevant to variant casting.
if (@base.IsStatic)
- return Annotations.IsRelevantToVariantCasting (typeThatImplsInterface)
+ return Annotations.IsRelevantToVariantCasting (overrideInformation.InterfaceImplementor.Implementor)
|| IgnoreScope (@base.DeclaringType.Scope);
// If the implementing type is marked as instantiated, mark the implementation method.
// If the type is not instantiated, do not mark the implementation method
- return Annotations.IsInstantiated (typeThatImplsInterface);
+ return Annotations.IsInstantiated (overrideInformation.InterfaceImplementor.Implementor);
}
static bool IsSpecialSerializationConstructor (MethodDefinition method)
@@ -3256,7 +3256,7 @@ namespace Mono.Linker.Steps
// Only if the interface method is referenced, then all the methods which implemented must be kept, but not the other way round.
if (!markAllOverrides &&
Context.Resolve (@base) is MethodDefinition baseDefinition
- && new OverrideInformation.OverridePair (baseDefinition, method).IsStaticInterfaceMethodPair ())
+ && baseDefinition.DeclaringType.IsInterface && baseDefinition.IsStatic && method.IsStatic)
continue;
MarkMethod (@base, new DependencyInfo (DependencyKind.MethodImplOverride, method), ScopeStack.CurrentScope.Origin);
MarkExplicitInterfaceImplementation (method, @base);
diff --git a/src/tools/illink/src/linker/Linker/Annotations.cs b/src/tools/illink/src/linker/Linker/Annotations.cs
index 8f7747cba354..a7b3198265e8 100644
--- a/src/tools/illink/src/linker/Linker/Annotations.cs
+++ b/src/tools/illink/src/linker/Linker/Annotations.cs
@@ -462,7 +462,7 @@ namespace Mono.Linker
/// DefaultInterfaceMethod is the method that implements <paramref name="method"/>.
/// </summary>
/// <param name="method">The interface method to find default implementations for</param>
- public IEnumerable<(TypeDefinition ImplementingType, InterfaceImplementation InterfaceImpl, MethodDefinition DefaultInterfaceMethod)>? GetDefaultInterfaceImplementations (MethodDefinition method)
+ public IEnumerable<OverrideInformation>? GetDefaultInterfaceImplementations (MethodDefinition method)
{
return TypeMapInfo.GetDefaultInterfaceImplementations (method);
}
diff --git a/src/tools/illink/src/linker/Linker/InterfaceImplementor.cs b/src/tools/illink/src/linker/Linker/InterfaceImplementor.cs
new file mode 100644
index 000000000000..e981ce872703
--- /dev/null
+++ b/src/tools/illink/src/linker/Linker/InterfaceImplementor.cs
@@ -0,0 +1,59 @@
+// Copyright (c) .NET Foundation and contributors. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.Diagnostics;
+using Mono.Cecil;
+
+namespace Mono.Linker
+{
+ public class InterfaceImplementor
+ {
+ /// <summary>
+ /// The type that implements <see cref="InterfaceImplementor.InterfaceType"/>.
+ /// </summary>
+ public TypeDefinition Implementor { get; }
+ /// <summary>
+ /// The .interfaceimpl on <see cref="InterfaceImplementor.Implementor"/>that points to <see cref="InterfaceImplementor.InterfaceType"/>
+ /// </summary>
+ public InterfaceImplementation InterfaceImplementation { get; }
+ /// <summary>
+ /// The type of the interface that is implemented by <see cref="InterfaceImplementor.Implementor"/>
+ /// </summary>
+ public TypeDefinition InterfaceType { get; }
+
+ public InterfaceImplementor (TypeDefinition implementor, InterfaceImplementation interfaceImplementation, TypeDefinition interfaceType, IMetadataResolver resolver)
+ {
+ Implementor = implementor;
+ InterfaceImplementation = interfaceImplementation;
+ InterfaceType = interfaceType;
+ Debug.Assert(resolver.Resolve (interfaceImplementation.InterfaceType) == interfaceType);
+ }
+
+ public static InterfaceImplementor Create(TypeDefinition implementor, TypeDefinition interfaceType, IMetadataResolver resolver)
+ {
+ foreach(InterfaceImplementation iface in implementor.Interfaces) {
+ if (resolver.Resolve(iface.InterfaceType) == interfaceType) {
+ return new InterfaceImplementor(implementor, iface, interfaceType, resolver);
+ }
+ }
+
+ Queue<TypeDefinition> ifacesToCheck = new ();
+ ifacesToCheck.Enqueue(implementor);
+ while (ifacesToCheck.Count > 0) {
+ var currentIface = ifacesToCheck.Dequeue ();
+
+ foreach(InterfaceImplementation ifaceImpl in currentIface.Interfaces) {
+ var iface = resolver.Resolve (ifaceImpl.InterfaceType);
+ if (iface == interfaceType) {
+ return new InterfaceImplementor(implementor, ifaceImpl, interfaceType, resolver);
+ }
+ ifacesToCheck.Enqueue (iface);
+ }
+ }
+ throw new InvalidOperationException ($"Type '{implementor.FullName}' does not implement interface '{interfaceType.FullName}' directly or through any interfaces");
+ }
+ }
+}
diff --git a/src/tools/illink/src/linker/Linker/OverrideInformation.cs b/src/tools/illink/src/linker/Linker/OverrideInformation.cs
index 077353eb2ee7..0727d5d25c19 100644
--- a/src/tools/illink/src/linker/Linker/OverrideInformation.cs
+++ b/src/tools/illink/src/linker/Linker/OverrideInformation.cs
@@ -3,71 +3,39 @@
using System.Diagnostics;
using Mono.Cecil;
+using System.Diagnostics.CodeAnalysis;
namespace Mono.Linker
{
[DebuggerDisplay ("{Override}")]
public class OverrideInformation
{
- readonly ITryResolveMetadata resolver;
- readonly OverridePair _pair;
- private InterfaceImplementation? _matchingInterfaceImplementation;
+ public MethodDefinition Base { get; }
- public OverrideInformation (MethodDefinition @base, MethodDefinition @override, ITryResolveMetadata resolver, InterfaceImplementation? matchingInterfaceImplementation = null)
- {
- _pair = new OverridePair (@base, @override);
- _matchingInterfaceImplementation = matchingInterfaceImplementation;
- this.resolver = resolver;
- }
- public readonly record struct OverridePair (MethodDefinition Base, MethodDefinition Override)
- {
- public bool IsStaticInterfaceMethodPair () => Base.DeclaringType.IsInterface && Base.IsStatic && Override.IsStatic;
- public InterfaceImplementation? GetMatchingInterfaceImplementation (ITryResolveMetadata resolver)
- {
- if (!Base.DeclaringType.IsInterface)
- return null;
- var interfaceType = Base.DeclaringType;
- foreach (var @interface in Override.DeclaringType.Interfaces) {
- if (resolver.TryResolve (@interface.InterfaceType)?.Equals (interfaceType) == true) {
- return @interface;
- }
- }
- return null;
- }
- }
+ public MethodDefinition Override { get; }
- public MethodDefinition Base { get => _pair.Base; }
- public MethodDefinition Override { get => _pair.Override; }
- public InterfaceImplementation? MatchingInterfaceImplementation {
- get {
- if (_matchingInterfaceImplementation is not null)
- return _matchingInterfaceImplementation;
- _matchingInterfaceImplementation = _pair.GetMatchingInterfaceImplementation (resolver);
- return _matchingInterfaceImplementation;
- }
- }
+ internal InterfaceImplementor? InterfaceImplementor { get; }
- public bool IsOverrideOfInterfaceMember {
- get {
- if (MatchingInterfaceImplementation != null)
- return true;
-
- return Base.DeclaringType.IsInterface;
- }
+ internal OverrideInformation (MethodDefinition @base, MethodDefinition @override, InterfaceImplementor? interfaceImplementor = null)
+ {
+ Base = @base;
+ Override = @override;
+ InterfaceImplementor = interfaceImplementor;
+ // Ensure we have an interface implementation if the base method is from an interface and the override method is on a class
+ Debug.Assert(@base.DeclaringType.IsInterface && interfaceImplementor != null
+ || !@base.DeclaringType.IsInterface && interfaceImplementor == null);
+ // Ensure the interfaceImplementor is for the interface we expect
+ Debug.Assert (@base.DeclaringType.IsInterface ? interfaceImplementor!.InterfaceType == @base.DeclaringType : true);
}
- public TypeDefinition? InterfaceType {
- get {
- if (!IsOverrideOfInterfaceMember)
- return null;
+ public InterfaceImplementation? MatchingInterfaceImplementation
+ => InterfaceImplementor?.InterfaceImplementation;
- if (MatchingInterfaceImplementation != null)
- return resolver.TryResolve (MatchingInterfaceImplementation.InterfaceType);
-
- return Base.DeclaringType;
- }
- }
+ public TypeDefinition? InterfaceType
+ => InterfaceImplementor?.InterfaceType;
- public bool IsStaticInterfaceMethodPair => _pair.IsStaticInterfaceMethodPair ();
+ [MemberNotNullWhen (true, nameof (InterfaceImplementor), nameof (MatchingInterfaceImplementation))]
+ public bool IsOverrideOfInterfaceMember
+ => InterfaceImplementor != null;
}
}
diff --git a/src/tools/illink/src/linker/Linker/TypeMapInfo.cs b/src/tools/illink/src/linker/Linker/TypeMapInfo.cs
index 804b2ad93ec2..bb2836a804d7 100644
--- a/src/tools/illink/src/linker/Linker/TypeMapInfo.cs
+++ b/src/tools/illink/src/linker/Linker/TypeMapInfo.cs
@@ -29,9 +29,11 @@
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
+using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
+using System.Linq;
using Mono.Cecil;
namespace Mono.Linker
@@ -43,7 +45,7 @@ namespace Mono.Linker
readonly LinkContext context;
protected readonly Dictionary<MethodDefinition, List<OverrideInformation>> base_methods = new Dictionary<MethodDefinition, List<OverrideInformation>> ();
protected readonly Dictionary<MethodDefinition, List<OverrideInformation>> override_methods = new Dictionary<MethodDefinition, List<OverrideInformation>> ();
- protected readonly Dictionary<MethodDefinition, List<(TypeDefinition InstanceType, InterfaceImplementation ImplementationProvider, MethodDefinition DefaultImplementationMethod)>> default_interface_implementations = new Dictionary<MethodDefinition, List<(TypeDefinition, InterfaceImplementation, MethodDefinition)>> ();
+ protected readonly Dictionary<MethodDefinition, List<OverrideInformation>> default_interface_implementations = new Dictionary<MethodDefinition, List<OverrideInformation>> ();
public TypeMapInfo (LinkContext context)
{
@@ -92,41 +94,41 @@ namespace Mono.Linker
/// DefaultInterfaceMethod is the method that implements <paramref name="method"/>.
/// </summary>
/// <param name="method">The interface method to find default implementations for</param>
- public IEnumerable<(TypeDefinition ImplementingType, InterfaceImplementation InterfaceImpl, MethodDefinition DefaultImplementationMethod)>? GetDefaultInterfaceImplementations (MethodDefinition baseMethod)
+ public IEnumerable<OverrideInformation>? GetDefaultInterfaceImplementations (MethodDefinition baseMethod)
{
default_interface_implementations.TryGetValue (baseMethod, out var ret);
return ret;
}
- public void AddBaseMethod (MethodDefinition method, MethodDefinition @base, InterfaceImplementation? matchingInterfaceImplementation)
+ public void AddBaseMethod (MethodDefinition method, MethodDefinition @base, InterfaceImplementor? interfaceImplementor)
{
if (!base_methods.TryGetValue (method, out List<OverrideInformation>? methods)) {
methods = new List<OverrideInformation> ();
base_methods[method] = methods;
}
- methods.Add (new OverrideInformation (@base, method, context, matchingInterfaceImplementation));
+ methods.Add (new OverrideInformation (@base, method, interfaceImplementor));
}
- public void AddOverride (MethodDefinition @base, MethodDefinition @override, InterfaceImplementation? matchingInterfaceImplementation = null)
+ public void AddOverride (MethodDefinition @base, MethodDefinition @override, InterfaceImplementor? interfaceImplementor = null)
{
if (!override_methods.TryGetValue (@base, out List<OverrideInformation>? methods)) {
methods = new List<OverrideInformation> ();
override_methods.Add (@base, methods);
}
- methods.Add (new OverrideInformation (@base, @override, context, matchingInterfaceImplementation));
+ methods.Add (new OverrideInformation (@base, @override, interfaceImplementor));
}
- public void AddDefaultInterfaceImplementation (MethodDefinition @base, TypeDefinition implementingType, (InterfaceImplementation, MethodDefinition) matchingInterfaceImplementation)
+ public void AddDefaultInterfaceImplementation (MethodDefinition @base, InterfaceImplementor interfaceImplementor, MethodDefinition defaultImplementationMethod)
{
Debug.Assert(@base.DeclaringType.IsInterface);
if (!default_interface_implementations.TryGetValue (@base, out var implementations)) {
- implementations = new List<(TypeDefinition, InterfaceImplementation, MethodDefinition)> ();
+ implementations = new List<OverrideInformation> ();
default_interface_implementations.Add (@base, implementations);
}
- implementations.Add ((implementingType, matchingInterfaceImplementation.Item1, matchingInterfaceImplementation.Item2));
+ implementations.Add (new (@base, defaultImplementationMethod, interfaceImplementor));
}
protected virtual void MapType (TypeDefinition type)
@@ -168,20 +170,20 @@ namespace Mono.Linker
// Try to find an implementation with a name/sig match on the current type
MethodDefinition? exactMatchOnType = TryMatchMethod (type, interfaceMethod);
if (exactMatchOnType != null) {
- AnnotateMethods (resolvedInterfaceMethod, exactMatchOnType);
+ AnnotateMethods (resolvedInterfaceMethod, exactMatchOnType, new (type, interfaceImpl.OriginalImpl, resolvedInterfaceMethod.DeclaringType, context));
continue;
}
// Next try to find an implementation with a name/sig match in the base hierarchy
var @base = GetBaseMethodInTypeHierarchy (type, interfaceMethod);
if (@base != null) {
- AnnotateMethods (resolvedInterfaceMethod, @base, interfaceImpl.OriginalImpl);
+ AnnotateMethods (resolvedInterfaceMethod, @base, new (type, interfaceImpl.OriginalImpl, resolvedInterfaceMethod.DeclaringType, context));
continue;
}
}
// Look for a default implementation last.
- FindAndAddDefaultInterfaceImplementations (type, type, resolvedInterfaceMethod);
+ FindAndAddDefaultInterfaceImplementations (type, type, resolvedInterfaceMethod, interfaceImpl.OriginalImpl);
}
}
}
@@ -211,24 +213,29 @@ namespace Mono.Linker
if (@base == null)
return;
+ Debug.Assert(!@base.DeclaringType.IsInterface);
+
AnnotateMethods (@base, method);
}
void MapOverrides (MethodDefinition method)
{
- foreach (MethodReference override_ref in method.Overrides) {
- MethodDefinition? @override = context.TryResolve (override_ref);
- if (@override == null)
+ foreach (MethodReference baseMethodRef in method.Overrides) {
+ MethodDefinition? baseMethod = context.TryResolve (baseMethodRef);
+ if (baseMethod == null)
continue;
-
- AnnotateMethods (@override, method);
+ if (baseMethod.DeclaringType.IsInterface) {
+ AnnotateMethods (baseMethod, method, InterfaceImplementor.Create (method.DeclaringType, baseMethod.DeclaringType, context));
+ } else {
+ AnnotateMethods (baseMethod, method);
+ }
}
}
- void AnnotateMethods (MethodDefinition @base, MethodDefinition @override, InterfaceImplementation? matchingInterfaceImplementation = null)
+ void AnnotateMethods (MethodDefinition @base, MethodDefinition @override, InterfaceImplementor? interfaceImplementor = null)
{
- AddBaseMethod (@override, @base, matchingInterfaceImplementation);
- AddOverride (@base, @override, matchingInterfaceImplementation);
+ AddBaseMethod (@override, @base, interfaceImplementor);
+ AddOverride (@base, @override, interfaceImplementor);
}
MethodDefinition? GetBaseMethodInTypeHierarchy (MethodDefinition method)
@@ -290,7 +297,7 @@ namespace Mono.Linker
/// <param name="implOfInterface">
/// The InterfaceImplementation on <paramref name="type"/> that points to the DeclaringType of <paramref name="interfaceMethod"/>.
/// </param>
- void FindAndAddDefaultInterfaceImplementations (TypeDefinition typeThatImplementsInterface, TypeDefinition typeThatMayHaveDIM, MethodDefinition interfaceMethodToBeImplemented)
+ void FindAndAddDefaultInterfaceImplementations (TypeDefinition typeThatImplementsInterface, TypeDefinition typeThatMayHaveDIM, MethodDefinition interfaceMethodToBeImplemented, InterfaceImplementation originalInterfaceImpl)
{
// Go over all interfaces, trying to find a method that is an explicit MethodImpl of the
// interface method in question.
@@ -305,7 +312,7 @@ namespace Mono.Linker
foreach (var potentialImplMethod in potentialImplInterface.Methods) {
if (potentialImplMethod == interfaceMethodToBeImplemented &&
!potentialImplMethod.IsAbstract) {
- AddDefaultInterfaceImplementation (interfaceMethodToBeImplemented, typeThatImplementsInterface, (interfaceImpl, potentialImplMethod));
+ AddDefaultInterfaceImplementation (interfaceMethodToBeImplemented, new (typeThatImplementsInterface, originalInterfaceImpl, interfaceMethodToBeImplemented.DeclaringType, context), potentialImplMethod);
foundImpl = true;
break;
}
@@ -314,9 +321,9 @@ namespace Mono.Linker
continue;
// This method is an override of something. Let's see if it's the method we are looking for.
- foreach (var @override in potentialImplMethod.Overrides) {
- if (context.TryResolve (@override) == interfaceMethodToBeImplemented) {
- AddDefaultInterfaceImplementation (interfaceMethodToBeImplemented, typeThatImplementsInterface, (interfaceImpl, potentialImplMethod));
+ foreach (var baseMethod in potentialImplMethod.Overrides) {
+ if (context.TryResolve (baseMethod) == interfaceMethodToBeImplemented) {
+ AddDefaultInterfaceImplementation (interfaceMethodToBeImplemented, new (typeThatImplementsInterface, originalInterfaceImpl, interfaceMethodToBeImplemented.DeclaringType, context), @potentialImplMethod);
foundImpl = true;
break;
}
@@ -330,7 +337,7 @@ namespace Mono.Linker
// We haven't found a MethodImpl on the current interface, but one of the interfaces
// this interface requires could still provide it.
if (!foundImpl) {
- FindAndAddDefaultInterfaceImplementations (typeThatImplementsInterface, potentialImplInterface, interfaceMethodToBeImplemented);
+ FindAndAddDefaultInterfaceImplementations (typeThatImplementsInterface, potentialImplInterface, interfaceMethodToBeImplemented, originalInterfaceImpl);
}
}
}
diff --git a/src/tools/illink/test/ILLink.RoslynAnalyzer.Tests/DataFlowTests.cs b/src/tools/illink/test/ILLink.RoslynAnalyzer.Tests/DataFlowTests.cs
index 2b1cf973d8a2..090a11b86167 100644
--- a/src/tools/illink/test/ILLink.RoslynAnalyzer.Tests/DataFlowTests.cs
+++ b/src/tools/illink/test/ILLink.RoslynAnalyzer.Tests/DataFlowTests.cs
@@ -150,6 +150,12 @@ namespace ILLink.RoslynAnalyzer.Tests
}
[Fact]
+ public Task FeatureCheckAttributeDataFlow ()
+ {
+ return RunTest ();
+ }
+
+ [Fact]
public Task FieldDataFlow ()
{
return RunTest (nameof (FieldDataFlow));
diff --git a/src/tools/illink/test/ILLink.RoslynAnalyzer.Tests/generated/ILLink.RoslynAnalyzer.Tests.Generator/ILLink.RoslynAnalyzer.Tests.TestCaseGenerator/Inheritance.InterfacesTests.g.cs b/src/tools/illink/test/ILLink.RoslynAnalyzer.Tests/generated/ILLink.RoslynAnalyzer.Tests.Generator/ILLink.RoslynAnalyzer.Tests.TestCaseGenerator/Inheritance.InterfacesTests.g.cs
index 2e1a2bbcb345..649b8449527f 100644
--- a/src/tools/illink/test/ILLink.RoslynAnalyzer.Tests/generated/ILLink.RoslynAnalyzer.Tests.Generator/ILLink.RoslynAnalyzer.Tests.TestCaseGenerator/Inheritance.InterfacesTests.g.cs
+++ b/src/tools/illink/test/ILLink.RoslynAnalyzer.Tests/generated/ILLink.RoslynAnalyzer.Tests.Generator/ILLink.RoslynAnalyzer.Tests.TestCaseGenerator/Inheritance.InterfacesTests.g.cs
@@ -16,6 +16,12 @@ namespace ILLink.RoslynAnalyzer.Tests.Inheritance
}
[Fact]
+ public Task InterfaceImplementedThroughBaseInterface ()
+ {
+ return RunTest (allowMissingWarnings: true);
+ }
+
+ [Fact]
public Task InterfaceOnUninstantiatedTypeRemoved ()
{
return RunTest (allowMissingWarnings: true);
diff --git a/src/tools/illink/test/Mono.Linker.Tests.Cases.Expectations/Support/FeatureCheckAttribute.cs b/src/tools/illink/test/Mono.Linker.Tests.Cases.Expectations/Support/FeatureCheckAttribute.cs
new file mode 100644
index 000000000000..2d284c2b3ea3
--- /dev/null
+++ b/src/tools/illink/test/Mono.Linker.Tests.Cases.Expectations/Support/FeatureCheckAttribute.cs
@@ -0,0 +1,17 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+namespace System.Diagnostics.CodeAnalysis
+{
+ // Allow AttributeTargets.Method for testing invalid usages of a custom FeatureCheckAttribute
+ [AttributeUsage (AttributeTargets.Property | AttributeTargets.Method, Inherited=false)]
+ public sealed class FeatureCheckAttribute : Attribute
+ {
+ public Type FeatureType { get; }
+
+ public FeatureCheckAttribute (Type featureType)
+ {
+ FeatureType = featureType;
+ }
+ }
+}
diff --git a/src/tools/illink/test/Mono.Linker.Tests.Cases.Expectations/Support/FeatureDependsOnAttribute.cs b/src/tools/illink/test/Mono.Linker.Tests.Cases.Expectations/Support/FeatureDependsOnAttribute.cs
new file mode 100644
index 000000000000..da9a19bbfebc
--- /dev/null
+++ b/src/tools/illink/test/Mono.Linker.Tests.Cases.Expectations/Support/FeatureDependsOnAttribute.cs
@@ -0,0 +1,16 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+namespace System.Diagnostics.CodeAnalysis
+{
+ [AttributeUsage(AttributeTargets.Class, Inherited=false, AllowMultiple=true)]
+ public sealed class FeatureDependsOnAttribute : Attribute
+ {
+ public Type FeatureType { get; }
+
+ public FeatureDependsOnAttribute(Type featureType)
+ {
+ FeatureType = featureType;
+ }
+ }
+}
diff --git a/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/Dependencies/TestFeatures.cs b/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/Dependencies/TestFeatures.cs
new file mode 100644
index 000000000000..942c9f3586dd
--- /dev/null
+++ b/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/Dependencies/TestFeatures.cs
@@ -0,0 +1,12 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+namespace ILLink.RoslynAnalyzer
+{
+ public class TestFeatures
+ {
+ public static bool IsUnreferencedCodeSupported => true;
+
+ public static bool IsAssemblyFilesSupported => true;
+ }
+}
diff --git a/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckAttributeDataFlow.cs b/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckAttributeDataFlow.cs
new file mode 100644
index 000000000000..d8aa258c3775
--- /dev/null
+++ b/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckAttributeDataFlow.cs
@@ -0,0 +1,615 @@
+// Copyright (c) .NET Foundation and contributors. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
+using System;
+using System.Runtime.CompilerServices;
+using System.Diagnostics;
+using System.Diagnostics.CodeAnalysis;
+using ILLink.RoslynAnalyzer;
+using Mono.Linker.Tests.Cases.Expectations.Assertions;
+using Mono.Linker.Tests.Cases.Expectations.Helpers;
+using Mono.Linker.Tests.Cases.Expectations.Metadata;
+
+namespace Mono.Linker.Tests.Cases.DataFlow
+{
+ [SkipKeptItemsValidation]
+ [ExpectedNoWarnings]
+ // Note: the XML must be passed as an embedded resource named ILLink.Substitutions.xml,
+ // not as a separate substitution file, for it to work with NativeAot.
+ // Related: https://github.com/dotnet/runtime/issues/88647
+ [SetupCompileBefore ("TestFeatures.dll", new[] { "Dependencies/TestFeatures.cs" },
+ resources: new object[] { new [] { "FeatureCheckDataFlowTestSubstitutions.xml", "ILLink.Substitutions.xml" } })]
+ // FeatureCheckAttribute is currently only supported by the analyzer.
+ // The same guard behavior is achieved for ILLink/ILCompiler using substitutions.
+ [SetupCompileResource ("FeatureCheckAttributeDataFlowTestSubstitutions.xml", "ILLink.Substitutions.xml")]
+ [IgnoreSubstitutions (false)]
+ public class FeatureCheckAttributeDataFlow
+ {
+ public static void Main ()
+ {
+ DefineFeatureCheck.Test ();
+ ValidGuardBodies.Test ();
+ InvalidGuardBodies.Test ();
+ InvalidFeatureChecks.Test ();
+ }
+
+ class DefineFeatureCheck {
+ [FeatureCheck (typeof(RequiresDynamicCodeAttribute))]
+ static bool GuardDynamicCode => RuntimeFeature.IsDynamicCodeSupported;
+
+ static void TestGuardDynamicCode ()
+ {
+ if (GuardDynamicCode)
+ RequiresDynamicCode ();
+ }
+
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool GuardUnreferencedCode => TestFeatures.IsUnreferencedCodeSupported;
+
+ static void TestGuardUnreferencedCode ()
+ {
+ if (GuardUnreferencedCode)
+ RequiresUnreferencedCode ();
+ }
+
+ [FeatureCheck (typeof(RequiresAssemblyFilesAttribute))]
+ static bool GuardAssemblyFiles => TestFeatures.IsAssemblyFilesSupported;
+
+ static void TestGuardAssemblyFiles ()
+ {
+ if (GuardAssemblyFiles)
+ RequiresAssemblyFiles ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresDynamicCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(DynamicCodeAndUnreferencedCode))]
+ static bool GuardDynamicCodeAndUnreferencedCode => RuntimeFeature.IsDynamicCodeSupported && TestFeatures.IsUnreferencedCodeSupported;
+
+ [FeatureDependsOn (typeof (RequiresDynamicCodeAttribute))]
+ [FeatureDependsOn (typeof (RequiresUnreferencedCodeAttribute))]
+ static class DynamicCodeAndUnreferencedCode {}
+
+ static void TestMultipleGuards ()
+ {
+ if (GuardDynamicCodeAndUnreferencedCode) {
+ RequiresDynamicCode ();
+ RequiresUnreferencedCode ();
+ }
+ }
+
+ [FeatureDependsOn (typeof (RequiresDynamicCodeAttribute))]
+ static class DynamicCode1 {}
+
+ [FeatureDependsOn (typeof (DynamicCode1))]
+ static class DynamicCode2 {}
+
+ [FeatureCheck (typeof (DynamicCode2))]
+ static bool GuardDynamicCodeIndirect => RuntimeFeature.IsDynamicCodeSupported;
+
+ static void TestIndirectGuard ()
+ {
+ if (GuardDynamicCodeIndirect)
+ RequiresDynamicCode ();
+ }
+
+ [FeatureDependsOn (typeof (RequiresDynamicCodeAttribute))]
+ [FeatureDependsOn (typeof (DynamicCodeCycle))]
+ static class DynamicCodeCycle {}
+
+ [FeatureCheck (typeof (DynamicCodeCycle))]
+ static bool GuardDynamicCodeCycle => RuntimeFeature.IsDynamicCodeSupported;
+
+ [FeatureCheck (typeof (DynamicCodeCycle))]
+ static void TestFeatureDependencyCycle1 ()
+ {
+ if (GuardDynamicCodeCycle)
+ RequiresDynamicCode ();
+ }
+
+ [FeatureDependsOn (typeof (DynamicCodeCycle2_B))]
+ static class DynamicCodeCycle2_A {}
+
+ [FeatureDependsOn (typeof (RequiresDynamicCodeAttribute))]
+ [FeatureDependsOn (typeof (DynamicCodeCycle2_A))]
+ static class DynamicCodeCycle2_B {}
+
+ [FeatureDependsOn (typeof (DynamicCodeCycle2_A))]
+ static class DynamicCodeCycle2 {}
+
+ [FeatureCheck (typeof (DynamicCodeCycle2))]
+ static bool GuardDynamicCodeCycle2 => RuntimeFeature.IsDynamicCodeSupported;
+
+ static void TestFeatureDependencyCycle2 ()
+ {
+ if (GuardDynamicCodeCycle2)
+ RequiresDynamicCode ();
+ }
+
+ public static void Test ()
+ {
+ TestGuardDynamicCode ();
+ TestGuardUnreferencedCode ();
+ TestGuardAssemblyFiles ();
+ TestMultipleGuards ();
+ TestIndirectGuard ();
+ TestFeatureDependencyCycle1 ();
+ TestFeatureDependencyCycle2 ();
+ }
+ }
+
+ class ValidGuardBodies {
+
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool ReturnFalseGuard => false;
+
+ static void TestReturnFalseGuard ()
+ {
+ if (ReturnFalseGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool DirectGuard => TestFeatures.IsUnreferencedCodeSupported;
+
+ static void TestDirectGuard ()
+ {
+ if (DirectGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool IndirectGuard => DirectGuard;
+
+ static void TestIndirectGuard ()
+ {
+ if (IndirectGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ // Analyzer doesn't understand this pattern because it compiles into a CFG that effectively
+ // looks like this:
+ //
+ // bool tmp;
+ // if (TestFeatures.IsUnreferencedCodeSupported)
+ // tmp = OtherCondition ();
+ // else
+ // tmp = false;
+ // return tmp;
+ //
+ // The analyzer doesn't do constant propagation of the boolean, so it doesn't know that
+ // the return value is always false when TestFeatures.IsUnreferencedCodeSupported is false.
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool AndGuard => TestFeatures.IsUnreferencedCodeSupported && OtherCondition ();
+
+ static void TestAndGuard ()
+ {
+ if (AndGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool NotNotGuard => !!TestFeatures.IsUnreferencedCodeSupported;
+
+ static void TestNotNotGuard ()
+ {
+ if (NotNotGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool EqualsTrueGuard => TestFeatures.IsUnreferencedCodeSupported == true;
+
+ static void TestEqualsTrueGuard ()
+ {
+ if (EqualsTrueGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool TrueEqualsGuard => true == TestFeatures.IsUnreferencedCodeSupported;
+
+ static void TestTrueEqualsGuard ()
+ {
+ if (TrueEqualsGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool NotEqualsFalseGuard => TestFeatures.IsUnreferencedCodeSupported != false;
+
+ static void TestNotEqualsFalseGuard ()
+ {
+ if (NotEqualsFalseGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool FalseNotEqualsGuard => false != TestFeatures.IsUnreferencedCodeSupported;
+
+ static void TestFalseNotEqualsGuard ()
+ {
+ if (FalseNotEqualsGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool IsTrueGuard => TestFeatures.IsUnreferencedCodeSupported is true;
+
+ static void TestIsTrueGuard ()
+ {
+ if (IsTrueGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool IsNotFalseGuard => TestFeatures.IsUnreferencedCodeSupported is not false;
+
+ static void TestIsNotFalseGuard ()
+ {
+ if (IsNotFalseGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool IfReturnTrueGuard {
+ get {
+ if (TestFeatures.IsUnreferencedCodeSupported)
+ return true;
+ return false;
+ }
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool ElseReturnTrueGuard {
+ get {
+ if (!TestFeatures.IsUnreferencedCodeSupported)
+ return false;
+ else
+ return true;
+ }
+ }
+
+ static void TestElseReturnTrueGuard ()
+ {
+ if (ElseReturnTrueGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ static void TestIfReturnTrueGuard ()
+ {
+ if (IfReturnTrueGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [FeatureCheck (typeof (RequiresUnreferencedCodeAttribute))]
+ static bool AssertReturnFalseGuard {
+ get {
+ Debug.Assert (TestFeatures.IsUnreferencedCodeSupported);
+ return false;
+ }
+ }
+
+ static void TestAssertReturnFalseGuard ()
+ {
+ if (AssertReturnFalseGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [FeatureCheck (typeof (RequiresUnreferencedCodeAttribute))]
+ static bool AssertNotReturnFalseGuard {
+ get {
+ Debug.Assert (!TestFeatures.IsUnreferencedCodeSupported);
+ return false;
+ }
+ }
+
+ static void TestAssertNotReturnFalseGuard ()
+ {
+ if (AssertNotReturnFalseGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof (RequiresUnreferencedCodeAttribute))]
+ static bool AssertReturnTrueGuard {
+ get {
+ Debug.Assert (TestFeatures.IsUnreferencedCodeSupported);
+ return true;
+ }
+ }
+
+ static void TestAssertReturnTrueGuard ()
+ {
+ if (AssertReturnTrueGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [FeatureCheck (typeof (RequiresUnreferencedCodeAttribute))]
+ static bool ThrowGuard {
+ get {
+ if (!TestFeatures.IsUnreferencedCodeSupported)
+ throw new Exception ();
+ return false;
+ }
+ }
+
+ static void TestThrowGuard ()
+ {
+ if (ThrowGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool TernaryIfGuard => TestFeatures.IsUnreferencedCodeSupported ? true : false;
+
+ static void TestTernaryIfGuard ()
+ {
+ if (TernaryIfGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool TernaryElseGuard => !TestFeatures.IsUnreferencedCodeSupported ? false : true;
+
+ static void TestTernaryElseGuard ()
+ {
+ if (TernaryElseGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ public static void Test ()
+ {
+ TestDirectGuard ();
+ TestIndirectGuard ();
+
+ TestReturnFalseGuard ();
+ TestAndGuard ();
+ TestNotNotGuard ();
+ TestEqualsTrueGuard ();
+ TestTrueEqualsGuard ();
+ TestNotEqualsFalseGuard ();
+ TestFalseNotEqualsGuard ();
+ TestIsTrueGuard ();
+ TestIsNotFalseGuard ();
+ TestIfReturnTrueGuard ();
+ TestElseReturnTrueGuard ();
+ TestAssertReturnFalseGuard ();
+ TestAssertNotReturnFalseGuard ();
+ TestAssertReturnTrueGuard ();
+ TestThrowGuard ();
+ TestTernaryIfGuard ();
+ TestTernaryElseGuard ();
+ }
+ }
+
+ class InvalidGuardBodies {
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool ReturnTrueGuard => true;
+
+ static void TestReturnTrueGuard ()
+ {
+ if (ReturnTrueGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool OtherConditionGuard => OtherCondition ();
+
+ static void TestOtherConditionGuard ()
+ {
+ if (OtherConditionGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool OrGuard => TestFeatures.IsUnreferencedCodeSupported || OtherCondition ();
+
+ static void TestOrGuard ()
+ {
+ if (OrGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool NotGuard => !TestFeatures.IsUnreferencedCodeSupported;
+
+ static void TestNotGuard ()
+ {
+ if (NotGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool EqualsFalseGuard => TestFeatures.IsUnreferencedCodeSupported == false;
+
+ static void TestEqualsFalseGuard ()
+ {
+ if (EqualsFalseGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool FalseEqualsGuard => false == TestFeatures.IsUnreferencedCodeSupported;
+
+ static void TestFalseEqualsGuard ()
+ {
+ if (FalseEqualsGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool NotEqualsTrueGuard => TestFeatures.IsUnreferencedCodeSupported != true;
+
+ static void TestNotEqualsTrueGuard ()
+ {
+ if (NotEqualsTrueGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool TrueNotEqualsGuard => true != TestFeatures.IsUnreferencedCodeSupported;
+
+ static void TestTrueNotEqualsGuard ()
+ {
+ if (TrueNotEqualsGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool IsNotTrueGuard => TestFeatures.IsUnreferencedCodeSupported is not true;
+
+ static void TestIsNotTrueGuard ()
+ {
+ if (IsNotTrueGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool IsFalseGuard => TestFeatures.IsUnreferencedCodeSupported is false;
+
+ static void TestIsFalseGuard ()
+ {
+ if (IsFalseGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool IfReturnFalseGuard {
+ get {
+ if (TestFeatures.IsUnreferencedCodeSupported)
+ return false;
+ return true;
+ }
+ }
+
+ static void TestIfReturnFalseGuard ()
+ {
+ if (IfReturnFalseGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool ElseReturnFalseGuard {
+ get {
+ if (!TestFeatures.IsUnreferencedCodeSupported)
+ return true;
+ else
+ return false;
+ }
+ }
+
+ static void TestElseReturnFalseGuard ()
+ {
+ if (ElseReturnFalseGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4000", nameof (RequiresUnreferencedCodeAttribute), ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof (RequiresUnreferencedCodeAttribute))]
+ static bool AssertNotReturnTrueGuard {
+ get {
+ Debug.Assert (!TestFeatures.IsUnreferencedCodeSupported);
+ return true;
+ }
+ }
+
+ static void TestAssertNotReturnTrueGuard ()
+ {
+ if (AssertNotReturnTrueGuard)
+ RequiresUnreferencedCode ();
+ }
+
+ public static void Test ()
+ {
+ TestOtherConditionGuard ();
+
+ TestReturnTrueGuard ();
+ TestOrGuard ();
+ TestNotGuard ();
+ TestEqualsFalseGuard ();
+ TestFalseEqualsGuard ();
+ TestNotEqualsTrueGuard ();
+ TestTrueNotEqualsGuard ();
+ TestIsNotTrueGuard ();
+ TestIsFalseGuard ();
+ TestIfReturnFalseGuard ();
+ TestElseReturnFalseGuard ();
+ TestAssertNotReturnTrueGuard ();
+ }
+ }
+
+ class InvalidFeatureChecks {
+ [ExpectedWarning ("IL4001", ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static int NonBooleanProperty => 0;
+
+ [ExpectedWarning ("IL2026", nameof (RequiresUnreferencedCodeAttribute))]
+ static void TestNonBooleanProperty ()
+ {
+ if (NonBooleanProperty == 0)
+ RequiresUnreferencedCode ();
+ }
+
+ [ExpectedWarning ("IL4001", ProducedBy = Tool.Analyzer)]
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ bool NonStaticProperty => true;
+
+ [ExpectedWarning ("IL2026", nameof (RequiresUnreferencedCodeAttribute))]
+ static void TestNonStaticProperty ()
+ {
+ var instance = new InvalidFeatureChecks ();
+ if (instance.NonStaticProperty)
+ RequiresUnreferencedCode ();
+ }
+
+ // No warning for this case because we don't validate that the attribute usage matches
+ // the expected AttributeUsage.Property for assemblies that define their own version
+ // of FeatureCheckAttributes.
+ [FeatureCheck (typeof(RequiresUnreferencedCodeAttribute))]
+ static bool Method () => true;
+
+ [ExpectedWarning ("IL2026", nameof (RequiresUnreferencedCodeAttribute))]
+ static void TestMethod ()
+ {
+ if (Method ())
+ RequiresUnreferencedCode ();
+ }
+
+ public static void Test ()
+ {
+ TestNonBooleanProperty ();
+ TestNonStaticProperty ();
+ TestMethod ();
+ }
+ }
+
+ [RequiresDynamicCode (nameof (RequiresDynamicCode))]
+ static void RequiresDynamicCode () { }
+
+ [RequiresUnreferencedCode (nameof (RequiresUnreferencedCode))]
+ static void RequiresUnreferencedCode () { }
+
+ [RequiresAssemblyFiles (nameof (RequiresAssemblyFiles))]
+ static void RequiresAssemblyFiles () { }
+
+ static bool OtherCondition () => true;
+ }
+}
diff --git a/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckAttributeDataFlowTestSubstitutions.xml b/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckAttributeDataFlowTestSubstitutions.xml
new file mode 100644
index 000000000000..828ef795ae37
--- /dev/null
+++ b/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckAttributeDataFlowTestSubstitutions.xml
@@ -0,0 +1,52 @@
+<linker>
+ <assembly fullname="test, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null">
+ <type fullname="Mono.Linker.Tests.Cases.DataFlow.FeatureCheckAttributeDataFlow/DefineFeatureCheck" feature="System.Runtime.CompilerServices.RuntimeFeature.IsDynamicCodeSupported" featurevalue="false">
+ <method signature="System.Boolean get_GuardDynamicCode()" body="stub" value="false" />
+ <method signature="System.Boolean get_GuardDynamicCodeIndirect()" body="stub" value="false" />
+ <method signature="System.Boolean get_GuardDynamicCodeCycle()" body="stub" value="false" />
+ <method signature="System.Boolean get_GuardDynamicCodeCycle2()" body="stub" value="false" />
+ </type>
+ <type fullname="Mono.Linker.Tests.Cases.DataFlow.FeatureCheckAttributeDataFlow/DefineFeatureCheck">
+ <method signature="System.Boolean get_GuardUnreferencedCode()" body="stub" value="false" />
+ <method signature="System.Boolean get_GuardDynamicCodeAndUnreferencedCode()" body="stub" value="false" />
+ <method signature="System.Boolean get_GuardAssemblyFiles()" body="stub" value="false" />
+ </type>
+ <type fullname="Mono.Linker.Tests.Cases.DataFlow.FeatureCheckAttributeDataFlow/ValidGuardBodies">
+ <method signature="System.Boolean get_DirectGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_IndirectGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_ReturnFalseGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_AndGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_NotNotGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_EqualsTrueGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_TrueEqualsGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_NotEqualsFalseGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_FalseNotEqualsGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_IsTrueGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_IsNotFalseGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_IfReturnTrueGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_ElseReturnTrueGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_AssertReturnFalseGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_AssertNotReturnFalseGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_AssertReturnTrueGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_ThrowGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_TernaryIfGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_TernaryElseGuard()" body="stub" value="false" />
+ </type>
+ <type fullname="Mono.Linker.Tests.Cases.DataFlow.FeatureCheckAttributeDataFlow/InvalidGuardBodies">
+ <method signature="System.Boolean get_OtherConditionGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_ReturnTrueGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_OrGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_NotGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_EqualsFalseGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_FalseEqualsGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_NotEqualsTrueGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_TrueNotEqualsGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_IsNotTrueGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_IsFalseGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_IfReturnFalseGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_ElseReturnFalseGuard()" body="stub" value="false" />
+ <method signature="System.Boolean get_AssertNotReturnTrueGuard()" body="stub" value="false" />
+
+ </type>
+ </assembly>
+</linker>
diff --git a/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckDataFlow.cs b/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckDataFlow.cs
index 29f18ea70638..d0d236997445 100644
--- a/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckDataFlow.cs
+++ b/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckDataFlow.cs
@@ -20,7 +20,8 @@ namespace Mono.Linker.Tests.Cases.DataFlow
// Note: the XML must be passed as an embedded resource named ILLink.Substitutions.xml,
// not as a separate substitution file, for it to work with NativeAot.
// Related: https://github.com/dotnet/runtime/issues/88647
- [SetupCompileResource ("FeatureCheckDataFlowTestSubstitutions.xml", "ILLink.Substitutions.xml")]
+ [SetupCompileBefore ("TestFeatures.dll", new[] { "Dependencies/TestFeatures.cs" },
+ resources: new object[] { new [] { "FeatureCheckDataFlowTestSubstitutions.xml", "ILLink.Substitutions.xml" } })]
[IgnoreSubstitutions (false)]
public class FeatureCheckDataFlow
{
@@ -525,14 +526,14 @@ namespace Mono.Linker.Tests.Cases.DataFlow
RequiresUnreferencedCode ();
}
- static void CallTestRequiresDynamicCodeGuarded ()
+ static void CallTestDynamicCodeGuarded ()
{
if (RuntimeFeature.IsDynamicCodeSupported)
RequiresDynamicCode ();
}
[ExpectedWarning ("IL3050", nameof (RequiresDynamicCode), ProducedBy = Tool.Analyzer | Tool.NativeAot)]
- static void CallTestRequiresDynamicCodeUnguarded ()
+ static void CallTestDynamicCodeUnguarded ()
{
RequiresDynamicCode ();
}
@@ -554,8 +555,8 @@ namespace Mono.Linker.Tests.Cases.DataFlow
{
CallTestUnreferencedCodeGuarded ();
CallTestUnreferencedCodeUnguarded ();
- CallTestRequiresDynamicCodeGuarded ();
- CallTestRequiresDynamicCodeUnguarded ();
+ CallTestDynamicCodeGuarded ();
+ CallTestDynamicCodeUnguarded ();
CallTestAssemblyFilesGuarded ();
CallTestAssemblyFilesUnguarded ();
}
@@ -1220,12 +1221,3 @@ namespace Mono.Linker.Tests.Cases.DataFlow
class RequiresAllGeneric<[DynamicallyAccessedMembers (DynamicallyAccessedMemberTypes.All)] T> {}
}
}
-
-namespace ILLink.RoslynAnalyzer
-{
- class TestFeatures
- {
- public static bool IsUnreferencedCodeSupported => true;
- public static bool IsAssemblyFilesSupported => true;
- }
-}
diff --git a/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckDataFlowTestSubstitutions.xml b/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckDataFlowTestSubstitutions.xml
index c096cf07d6e7..db0bf3703367 100644
--- a/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckDataFlowTestSubstitutions.xml
+++ b/src/tools/illink/test/Mono.Linker.Tests.Cases/DataFlow/FeatureCheckDataFlowTestSubstitutions.xml
@@ -1,5 +1,5 @@
<linker>
- <assembly fullname="test, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null">
+ <assembly fullname="TestFeatures, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null">
<type fullname="ILLink.RoslynAnalyzer.TestFeatures">
<method signature="System.Boolean get_IsUnreferencedCodeSupported()" body="stub" value="false" />
<method signature="System.Boolean get_IsAssemblyFilesSupported()" body="stub" value="false" />
diff --git a/src/tools/illink/test/Mono.Linker.Tests.Cases/Inheritance.Interfaces/Dependencies/InterfaceImplementedThroughBaseInterface.il b/src/tools/illink/test/Mono.Linker.Tests.Cases/Inheritance.Interfaces/Dependencies/InterfaceImplementedThroughBaseInterface.il
new file mode 100644
index 000000000000..61080f8b7d06
--- /dev/null
+++ b/src/tools/illink/test/Mono.Linker.Tests.Cases/Inheritance.Interfaces/Dependencies/InterfaceImplementedThroughBaseInterface.il
@@ -0,0 +1,48 @@
+// Copyright (c) .NET Foundation and contributors. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
+.assembly extern mscorlib { }
+
+.assembly 'library' { }
+
+.class interface public auto ansi abstract beforefieldinit IBase
+{
+ // Methods
+ .method public hidebysig newslot abstract virtual
+ instance void M () cil managed
+ {
+ } // end of method IBase::M
+
+} // end of class IBase
+
+.class interface public auto ansi abstract beforefieldinit IDerived
+ implements IBase
+{
+} // end of class IDerived
+
+.class public auto ansi beforefieldinit C
+ extends [System.Runtime]System.Object
+ implements IDerived
+{
+ // Methods
+ .method private final hidebysig newslot virtual
+ instance void IBase.M () cil managed
+ {
+ .override method instance void IBase::M()
+ // Method begins at RVA 0x2050
+ // Code size 2 (0x2)
+ .maxstack 8
+
+ IL_0001: ret
+ } // end of method C::IBase.M
+
+ .method public hidebysig specialname rtspecialname
+ instance void .ctor () cil managed
+ {
+ // Method begins at RVA 0x2053
+ // Code size 8 (0x8)
+ .maxstack 8
+
+ IL_0007: ret
+ } // end of method C::.ctor
+}
diff --git a/src/tools/illink/test/Mono.Linker.Tests.Cases/Inheritance.Interfaces/InterfaceImplementedThroughBaseInterface.cs b/src/tools/illink/test/Mono.Linker.Tests.Cases/Inheritance.Interfaces/InterfaceImplementedThroughBaseInterface.cs
new file mode 100644
index 000000000000..e701fb9c28ba
--- /dev/null
+++ b/src/tools/illink/test/Mono.Linker.Tests.Cases/Inheritance.Interfaces/InterfaceImplementedThroughBaseInterface.cs
@@ -0,0 +1,34 @@
+// Copyright (c) .NET Foundation and contributors. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+using Mono.Linker.Tests.Cases.Expectations.Assertions;
+using Mono.Linker.Tests.Cases.Expectations.Metadata;
+
+namespace Mono.Linker.Tests.Cases.Inheritance.Interfaces
+{
+ [SetupLinkerArgument ("--skip-unresolved", "true")]
+ [SetupLinkerArgument ("-a", "test.exe", "library")]
+ [SetupLinkerArgument ("-a", "library.dll", "library")]
+ [TestCaseRequirements (TestRunCharacteristics.SupportsDefaultInterfaceMethods, "Requires support for default interface methods")]
+ [Define ("IL_ASSEMBLY_AVAILABLE")]
+ [SetupCompileBefore ("library.dll", new[] { "Dependencies/InterfaceImplementedThroughBaseInterface.il" })]
+ [SkipILVerify]
+
+#if IL_ASSEMBLY_AVAILABLE
+ [KeptMemberInAssembly ("library.dll", typeof(C), "IBase.M()")]
+#endif
+ [KeptMember(".ctor()")]
+ public class InterfaceImplementedThroughBaseInterface
+ {
+ public static void Main ()
+ {
+ }
+ }
+}
+
+