-
Notifications
You must be signed in to change notification settings - Fork 2.6k
Commit
This is the branch thicket of patches in Git for Windows that are considered ready for upstream. To keep them in a ready-to-submit shape, they are kept as close to the beginning of the branch thicket as possible.
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,76 @@ | ||
name: Windows Nano Server tests | ||
|
||
on: | ||
workflow_dispatch: | ||
|
||
env: | ||
DEVELOPER: 1 | ||
|
||
jobs: | ||
test-nano-server: | ||
runs-on: windows-2022 | ||
env: | ||
WINDBG_DIR: "C:/Program Files (x86)/Windows Kits/10/Debuggers/x64" | ||
IMAGE: mcr.microsoft.com/powershell:nanoserver-ltsc2022 | ||
|
||
steps: | ||
- uses: actions/checkout@v4 | ||
- uses: git-for-windows/setup-git-for-windows-sdk@v1 | ||
- name: build Git | ||
shell: bash | ||
run: make -j15 | ||
- name: pull nanoserver image | ||
shell: bash | ||
run: docker pull $IMAGE | ||
- name: run nano-server test | ||
shell: bash | ||
run: | | ||
docker run \ | ||
--user "ContainerAdministrator" \ | ||
-v "$WINDBG_DIR:C:/dbg" \ | ||
-v "$(cygpath -aw /mingw64/bin):C:/mingw64-bin" \ | ||
-v "$(cygpath -aw .):C:/test" \ | ||
$IMAGE pwsh.exe -Command ' | ||
# Extend the PATH to include the `.dll` files in /mingw64/bin/ | ||
$env:PATH += ";C:\mingw64-bin" | ||
# For each executable to test pick some no-operation set of | ||
# flags/subcommands or something that should quickly result in an | ||
# error with known exit code that is not a negative 32-bit | ||
# number, and set the expected return code appropriately. | ||
# | ||
# Only test executables that could be expected to run in a UI | ||
# less environment. | ||
# | ||
# ( Executable path, arguments, expected return code ) | ||
# also note space is required before close parenthesis (a | ||
# powershell quirk when defining nested arrays like this) | ||
$executables_to_test = @( | ||
("C:\test\git.exe", "", 1 ), | ||
("C:\test\scalar.exe", "version", 0 ) | ||
) | ||
foreach ($executable in $executables_to_test) | ||
{ | ||
Write-Output "Now testing $($executable[0])" | ||
&$executable[0] $executable[1] | ||
if ($LASTEXITCODE -ne $executable[2]) { | ||
# if we failed, run the debugger to find out what function | ||
# or DLL could not be found and then exit the script with | ||
# failure The missing DLL or EXE will be referenced near | ||
# the end of the output | ||
# Set a flag to have the debugger show loader stub | ||
# diagnostics. This requires running as administrator, | ||
# otherwise the flag will be ignored. | ||
C:\dbg\gflags -i $executable[0] +SLS | ||
C:\dbg\cdb.exe -c "g" -c "q" $executable[0] $executable[1] | ||
exit 1 | ||
} | ||
} | ||
exit 0 | ||
' |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
sendpack.sideband:: | ||
Allows to disable the side-band-64k capability for send-pack even | ||
when it is advertised by the server. Makes it possible to work | ||
around a limitation in the git for windows implementation together | ||
with the dump git protocol. Defaults to true. |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
survey.*:: | ||
These variables adjust the default behavior of the `git survey` | ||
command. The intention is that this command could be run in the | ||
background with these options. | ||
+ | ||
-- | ||
verbose:: | ||
This boolean value implies the `--[no-]verbose` option. | ||
progress:: | ||
This boolean value implies the `--[no-]progress` option. | ||
top:: | ||
This integer value implies `--top=<N>`, specifying the | ||
number of entries in the detail tables. | ||
-- |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
windows.appendAtomically:: | ||
By default, append atomic API is used on windows. But it works only with | ||
local disk files, if you're working on a network file system, you should | ||
set it false to turn it off. |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,60 @@ | ||
git-backfill(1) | ||
=============== | ||
|
||
NAME | ||
---- | ||
git-backfill - Download missing objects in a partial clone | ||
|
||
|
||
SYNOPSIS | ||
-------- | ||
[verse] | ||
(EXPERIMENTAL) 'git backfill' [--batch-size=<n>] [--[no-]sparse] | ||
|
||
DESCRIPTION | ||
----------- | ||
|
||
Blobless partial clones are created using `git clone --filter=blob:none` | ||
and then configure the local repository such that the Git client avoids | ||
downloading blob objects unless they are required for a local operation. | ||
This initially means that the clone and later fetches download reachable | ||
commits and trees but no blobs. Later operations that change the `HEAD` | ||
pointer, such as `git checkout` or `git merge`, may need to download | ||
missing blobs in order to complete their operation. | ||
|
||
In the worst cases, commands that compute blob diffs, such as `git blame`, | ||
become very slow as they download the missing blobs in single-blob | ||
requests to satisfy the missing object as the Git command needs it. This | ||
leads to multiple download requests and no ability for the Git server to | ||
provide delta compression across those objects. | ||
|
||
The `git backfill` command provides a way for the user to request that | ||
Git downloads the missing blobs (with optional filters) such that the | ||
missing blobs representing historical versions of files can be downloaded | ||
in batches. The `backfill` command attempts to optimize the request by | ||
grouping blobs that appear at the same path, hopefully leading to good | ||
delta compression in the packfile sent by the server. | ||
|
||
By default, `git backfill` downloads all blobs reachable from the `HEAD` | ||
commit. This set can be restricted or expanded using various options. | ||
|
||
OPTIONS | ||
------- | ||
|
||
--batch-size=<n>:: | ||
Specify a minimum size for a batch of missing objects to request | ||
from the server. This size may be exceeded by the last set of | ||
blobs seen at a given path. Default batch size is 16,000. | ||
|
||
--[no-]sparse:: | ||
Only download objects if they appear at a path that matches the | ||
current sparse-checkout. If the sparse-checkout feature is enabled, | ||
then `--sparse` is assumed and can be disabled with `--no-sparse`. | ||
|
||
SEE ALSO | ||
-------- | ||
linkgit:git-clone[1]. | ||
|
||
GIT | ||
--- | ||
Part of the linkgit:git[1] suite |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,83 @@ | ||
git-survey(1) | ||
============= | ||
|
||
NAME | ||
---- | ||
git-survey - EXPERIMENTAL: Measure various repository dimensions of scale | ||
|
||
SYNOPSIS | ||
-------- | ||
[verse] | ||
(EXPERIMENTAL!) 'git survey' <options> | ||
|
||
DESCRIPTION | ||
----------- | ||
|
||
Survey the repository and measure various dimensions of scale. | ||
|
||
As repositories grow to "monorepo" size, certain data shapes can cause | ||
performance problems. `git-survey` attempts to measure and report on | ||
known problem areas. | ||
|
||
Ref Selection and Reachable Objects | ||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
|
||
In this first analysis phase, `git survey` will iterate over the set of | ||
requested branches, tags, and other refs and treewalk over all of the | ||
reachable commits, trees, and blobs and generate various statistics. | ||
|
||
OPTIONS | ||
------- | ||
|
||
--progress:: | ||
Show progress. This is automatically enabled when interactive. | ||
|
||
Ref Selection | ||
~~~~~~~~~~~~~ | ||
|
||
The following options control the set of refs that `git survey` will examine. | ||
By default, `git survey` will look at tags, local branches, and remote refs. | ||
If any of the following options are given, the default set is cleared and | ||
only refs for the given options are added. | ||
|
||
--all-refs:: | ||
Use all refs. This includes local branches, tags, remote refs, | ||
notes, and stashes. This option overrides all of the following. | ||
|
||
--branches:: | ||
Add local branches (`refs/heads/`) to the set. | ||
|
||
--tags:: | ||
Add tags (`refs/tags/`) to the set. | ||
|
||
--remotes:: | ||
Add remote branches (`refs/remote/`) to the set. | ||
|
||
--detached:: | ||
Add HEAD to the set. | ||
|
||
--other:: | ||
Add notes (`refs/notes/`) and stashes (`refs/stash/`) to the set. | ||
|
||
OUTPUT | ||
------ | ||
|
||
By default, `git survey` will print information about the repository in a | ||
human-readable format that includes overviews and tables. | ||
|
||
References Summary | ||
~~~~~~~~~~~~~~~~~~ | ||
|
||
The references summary includes a count of each kind of reference, | ||
including branches, remote refs, and tags (split by "all" and | ||
"annotated"). | ||
|
||
Reachable Object Summary | ||
~~~~~~~~~~~~~~~~~~~~~~~~ | ||
|
||
The reachable object summary shows the total number of each kind of Git | ||
object, including tags, commits, trees, and blobs. | ||
|
||
GIT | ||
--- | ||
Part of the linkgit:git[1] suite |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
Path-Walk API | ||
============= | ||
|
||
The path-walk API is used to walk reachable objects, but to visit objects | ||
in batches based on a common path they appear in, or by type. | ||
|
||
For example, all reachable commits are visited in a group. All tags are | ||
visited in a group. Then, all root trees are visited. At some point, all | ||
blobs reachable via a path `my/dir/to/A` are visited. When there are | ||
multiple paths possible to reach the same object, then only one of those | ||
paths is used to visit the object. | ||
|
||
When walking a range of commits with some `UNINTERESTING` objects, the | ||
objects with the `UNINTERESTING` flag are included in these batches. In | ||
order to walk `UNINTERESTING` objects, the `--boundary` option must be | ||
used in the commit walk in order to visit `UNINTERESTING` commits. | ||
|
||
Basics | ||
------ | ||
|
||
To use the path-walk API, include `path-walk.h` and call | ||
`walk_objects_by_path()` with a customized `path_walk_info` struct. The | ||
struct is used to set all of the options for how the walk should proceed. | ||
Let's dig into the different options and their use. | ||
|
||
`path_fn` and `path_fn_data`:: | ||
The most important option is the `path_fn` option, which is a | ||
function pointer to the callback that can execute logic on the | ||
object IDs for objects grouped by type and path. This function | ||
also receives a `data` value that corresponds to the | ||
`path_fn_data` member, for providing custom data structures to | ||
this callback function. | ||
|
||
`revs`:: | ||
To configure the exact details of the reachable set of objects, | ||
use the `revs` member and initialize it using the revision | ||
machinery in `revision.h`. Initialize `revs` using calls such as | ||
`setup_revisions()` or `parse_revision_opt()`. Do not call | ||
`prepare_revision_walk()`, as that will be called within | ||
`walk_objects_by_path()`. | ||
+ | ||
It is also important that you do not specify the `--objects` flag for the | ||
`revs` struct. The revision walk should only be used to walk commits, and | ||
the objects will be walked in a separate way based on those starting | ||
commits. | ||
+ | ||
If you want the path-walk API to emit `UNINTERESTING` objects based on the | ||
commit walk's boundary, be sure to set `revs.boundary` so the boundary | ||
commits are emitted. | ||
|
||
`commits`, `blobs`, `trees`, `tags`:: | ||
By default, these members are enabled and signal that the path-walk | ||
API should call the `path_fn` on objects of these types. Specialized | ||
applications could disable some options to make it simpler to walk | ||
the objects or to have fewer calls to `path_fn`. | ||
+ | ||
While it is possible to walk only commits in this way, consumers would be | ||
better off using the revision walk API instead. | ||
|
||
`prune_all_uninteresting`:: | ||
By default, all reachable paths are emitted by the path-walk API. | ||
This option allows consumers to declare that they are not | ||
interested in paths where all included objects are marked with the | ||
`UNINTERESTING` flag. This requires using the `boundary` option in | ||
the revision walk so that the walk emits commits marked with the | ||
`UNINTERESTING` flag. | ||
|
||
`pl`:: | ||
This pattern list pointer allows focusing the path-walk search to | ||
a set of patterns, only emitting paths that match the given | ||
patterns. See linkgit:gitignore[5] or | ||
linkgit:git-sparse-checkout[1] for details about pattern lists. | ||
When the pattern list uses cone-mode patterns, then the path-walk | ||
API can prune the set of paths it walks to improve performance. | ||
|
||
Examples | ||
-------- | ||
|
||
See example usages in: | ||
`t/helper/test-path-walk.c`, | ||
`builtin/backfill.c`, | ||
`builtin/pack-objects.c` |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,393 @@ | ||
variables: | ||
Agent.Source.Git.ShallowFetchDepth: 1 | ||
GIT_CONFIG_PARAMETERS: "'checkout.workers=56' 'user.name=CI' 'user.email=ci@git'" | ||
|
||
jobs: | ||
- job: windows_build | ||
displayName: Windows Build | ||
condition: succeeded() | ||
pool: | ||
vmImage: windows-latest | ||
timeoutInMinutes: 240 | ||
steps: | ||
- bash: git clone --bare --depth=1 --filter=blob:none --single-branch -b main https://github.com/git-for-windows/git-sdk-64 | ||
displayName: 'clone git-sdk-64' | ||
- bash: git clone --depth=1 --single-branch -b main https://github.com/git-for-windows/build-extra | ||
displayName: 'clone build-extra' | ||
- bash: sh -x ./build-extra/please.sh create-sdk-artifact --sdk=git-sdk-64.git --out=git-sdk-64-minimal minimal-sdk | ||
displayName: 'build git-sdk-64-minimal-sdk' | ||
- bash: | | ||
# Let Git ignore the SDK and the test-cache | ||
printf "%s\n" /git-sdk-64.git/ /build-extra/ /git-sdk-64-minimal/ /test-cache/ >>'.git/info/exclude' | ||
displayName: 'Ignore untracked directories' | ||
- bash: ci/make-test-artifacts.sh artifacts | ||
displayName: Build | ||
env: | ||
HOME: $(Build.SourcesDirectory) | ||
MSYSTEM: MINGW64 | ||
DEVELOPER: 1 | ||
NO_PERL: 1 | ||
PATH: "$(Build.SourcesDirectory)\\git-sdk-64-minimal\\mingw64\\bin;$(Build.SourcesDirectory)\\git-sdk-64-minimal\\usr\\bin;C:\\Windows\\system32;C:\\Windows;C:\\Windows\\system32\\wbem" | ||
- task: PublishPipelineArtifact@0 | ||
displayName: 'Publish Pipeline Artifact: test artifacts' | ||
inputs: | ||
artifactName: 'windows-artifacts' | ||
targetPath: '$(Build.SourcesDirectory)\artifacts' | ||
- task: PublishPipelineArtifact@0 | ||
displayName: 'Publish Pipeline Artifact: git-sdk-64-minimal' | ||
inputs: | ||
artifactName: 'git-sdk-64-minimal' | ||
targetPath: '$(Build.SourcesDirectory)\git-sdk-64-minimal' | ||
|
||
- job: windows_test | ||
displayName: Windows Test | ||
dependsOn: windows_build | ||
condition: succeeded() | ||
pool: | ||
vmImage: windows-latest | ||
timeoutInMinutes: 240 | ||
strategy: | ||
parallel: 10 | ||
steps: | ||
- task: DownloadPipelineArtifact@0 | ||
displayName: 'Download Pipeline Artifact: test artifacts' | ||
inputs: | ||
artifactName: 'windows-artifacts' | ||
targetPath: '$(Build.SourcesDirectory)' | ||
- task: DownloadPipelineArtifact@0 | ||
displayName: 'Download Pipeline Artifact: git-sdk-64-minimal' | ||
inputs: | ||
artifactName: 'git-sdk-64-minimal' | ||
targetPath: '$(Build.SourcesDirectory)\git-sdk-64-minimal' | ||
- bash: | | ||
test -f artifacts.tar.gz || { | ||
echo No test artifacts found\; skipping >&2 | ||
exit 0 | ||
} | ||
tar xf artifacts.tar.gz || exit 1 | ||
# Let Git ignore the SDK and the test-cache | ||
printf '%s\n' /git-sdk-64.git/ /build-extra/ /git-sdk-64-minimal/ /test-cache/ >>.git/info/exclude | ||
ci/run-test-slice.sh $SYSTEM_JOBPOSITIONINPHASE $SYSTEM_TOTALJOBSINPHASE || { | ||
ci/print-test-failures.sh | ||
exit 1 | ||
} | ||
displayName: 'Test (parallel)' | ||
env: | ||
HOME: $(Build.SourcesDirectory) | ||
MSYSTEM: MINGW64 | ||
NO_SVN_TESTS: 1 | ||
GIT_TEST_SKIP_REBASE_P: 1 | ||
PATH: "$(Build.SourcesDirectory)\\git-sdk-64-minimal\\mingw64\\bin;$(Build.SourcesDirectory)\\git-sdk-64-minimal\\usr\\bin\\core_perl;$(Build.SourcesDirectory)\\git-sdk-64-minimal\\usr\\bin;C:\\Windows\\system32;C:\\Windows;C:\\Windows\\system32\\wbem" | ||
- task: PublishTestResults@2 | ||
displayName: 'Publish Test Results **/TEST-*.xml' | ||
inputs: | ||
mergeTestResults: true | ||
testRunTitle: 'windows' | ||
platform: Windows | ||
publishRunAttachments: false | ||
condition: succeededOrFailed() | ||
- task: PublishBuildArtifacts@1 | ||
displayName: 'Publish trash directories of failed tests' | ||
condition: failed() | ||
inputs: | ||
PathtoPublish: t/failed-test-artifacts | ||
ArtifactName: failed-test-artifacts | ||
|
||
- job: vs_build | ||
displayName: Visual Studio Build | ||
condition: succeeded() | ||
pool: | ||
vmImage: windows-latest | ||
timeoutInMinutes: 240 | ||
steps: | ||
- bash: git clone --bare --depth=1 --filter=blob:none --single-branch -b main https://github.com/git-for-windows/git-sdk-64 | ||
displayName: 'clone git-sdk-64' | ||
- bash: git clone --depth=1 --single-branch -b main https://github.com/git-for-windows/build-extra | ||
displayName: 'clone build-extra' | ||
- bash: sh -x ./build-extra/please.sh create-sdk-artifact --sdk=git-sdk-64.git --out=git-sdk-64-minimal minimal-sdk | ||
displayName: 'build git-sdk-64-minimal-sdk' | ||
- bash: | | ||
# Let Git ignore the SDK and the test-cache | ||
printf "%s\n" /git-sdk-64-minimal/ /test-cache/ >>'.git/info/exclude' | ||
displayName: 'Ignore untracked directories' | ||
- bash: make NDEBUG=1 DEVELOPER=1 vcxproj | ||
displayName: Generate Visual Studio Solution | ||
env: | ||
HOME: $(Build.SourcesDirectory) | ||
MSYSTEM: MINGW64 | ||
DEVELOPER: 1 | ||
NO_PERL: 1 | ||
PATH: "$(Build.SourcesDirectory)\\git-sdk-64-minimal\\mingw64\\bin;$(Build.SourcesDirectory)\\git-sdk-64-minimal\\usr\\bin;C:\\Windows\\system32;C:\\Windows;C:\\Windows\\system32\\wbem" | ||
- powershell: | | ||
$urlbase = "https://dev.azure.com/git/git/_apis/build/builds" | ||
$id = ((Invoke-WebRequest -UseBasicParsing "${urlbase}?definitions=9&statusFilter=completed&resultFilter=succeeded&`$top=1").content | ConvertFrom-JSON).value[0].id | ||
$downloadUrl = ((Invoke-WebRequest -UseBasicParsing "${urlbase}/$id/artifacts").content | ConvertFrom-JSON).value[0].resource.downloadUrl | ||
(New-Object Net.WebClient).DownloadFile($downloadUrl, "compat.zip") | ||
Expand-Archive compat.zip -DestinationPath . -Force | ||
Remove-Item compat.zip | ||
displayName: 'Download vcpkg artifacts' | ||
- task: MSBuild@1 | ||
inputs: | ||
solution: git.sln | ||
platform: x64 | ||
configuration: Release | ||
maximumCpuCount: 4 | ||
msbuildArguments: /p:PlatformToolset=v142 | ||
- bash: | | ||
./compat/vcbuild/vcpkg_copy_dlls.bat release && | ||
mkdir -p artifacts && | ||
eval "$(make -n artifacts-tar INCLUDE_DLLS_IN_ARTIFACTS=YesPlease ARTIFACTS_DIRECTORY=artifacts | grep ^tar)" | ||
displayName: Bundle artifact tar | ||
env: | ||
HOME: $(Build.SourcesDirectory) | ||
MSYSTEM: MINGW64 | ||
DEVELOPER: 1 | ||
NO_PERL: 1 | ||
MSVC: 1 | ||
VCPKG_ROOT: $(Build.SourcesDirectory)\compat\vcbuild\vcpkg | ||
PATH: "$(Build.SourcesDirectory)\\git-sdk-64-minimal\\mingw64\\bin;$(Build.SourcesDirectory)\\git-sdk-64-minimal\\usr\\bin;C:\\Windows\\system32;C:\\Windows;C:\\Windows\\system32\\wbem" | ||
- powershell: | | ||
$tag = (Invoke-WebRequest -UseBasicParsing "https://gitforwindows.org/latest-tag.txt").content | ||
$version = (Invoke-WebRequest -UseBasicParsing "https://gitforwindows.org/latest-version.txt").content | ||
$url = "https://github.com/git-for-windows/git/releases/download/${tag}/PortableGit-${version}-64-bit.7z.exe" | ||
(New-Object Net.WebClient).DownloadFile($url,"PortableGit.exe") | ||
& .\PortableGit.exe -y -oartifacts\PortableGit | ||
# Wait until it is unpacked | ||
while (-not @(Remove-Item -ErrorAction SilentlyContinue PortableGit.exe; $?)) { sleep 1 } | ||
displayName: Download & extract portable Git | ||
- task: PublishPipelineArtifact@0 | ||
displayName: 'Publish Pipeline Artifact: MSVC test artifacts' | ||
inputs: | ||
artifactName: 'vs-artifacts' | ||
targetPath: '$(Build.SourcesDirectory)\artifacts' | ||
|
||
- job: vs_test | ||
displayName: Visual Studio Test | ||
dependsOn: vs_build | ||
condition: succeeded() | ||
pool: | ||
vmImage: windows-latest | ||
timeoutInMinutes: 240 | ||
strategy: | ||
parallel: 10 | ||
steps: | ||
- task: DownloadPipelineArtifact@0 | ||
displayName: 'Download Pipeline Artifact: VS test artifacts' | ||
inputs: | ||
artifactName: 'vs-artifacts' | ||
targetPath: '$(Build.SourcesDirectory)' | ||
- bash: | | ||
test -f artifacts.tar.gz || { | ||
echo No test artifacts found\; skipping >&2 | ||
exit 0 | ||
} | ||
tar xf artifacts.tar.gz || exit 1 | ||
# Let Git ignore the SDK and the test-cache | ||
printf '%s\n' /PortableGit/ /test-cache/ >>.git/info/exclude | ||
cd t && | ||
PATH="$PWD/helper:$PATH" && | ||
test-tool.exe run-command testsuite --jobs=10 -V -x --write-junit-xml \ | ||
$(test-tool.exe path-utils slice-tests \ | ||
$SYSTEM_JOBPOSITIONINPHASE $SYSTEM_TOTALJOBSINPHASE t[0-9]*.sh) | ||
displayName: 'Test (parallel)' | ||
env: | ||
HOME: $(Build.SourcesDirectory) | ||
MSYSTEM: MINGW64 | ||
NO_SVN_TESTS: 1 | ||
GIT_TEST_SKIP_REBASE_P: 1 | ||
PATH: "$(Build.SourcesDirectory)\\PortableGit\\mingw64\\bin;$(Build.SourcesDirectory)\\PortableGit\\usr\\bin;C:\\Windows\\system32;C:\\Windows;C:\\Windows\\system32\\wbem" | ||
- task: PublishTestResults@2 | ||
displayName: 'Publish Test Results **/TEST-*.xml' | ||
inputs: | ||
mergeTestResults: true | ||
testRunTitle: 'vs' | ||
platform: Windows | ||
publishRunAttachments: false | ||
condition: succeededOrFailed() | ||
- task: PublishBuildArtifacts@1 | ||
displayName: 'Publish trash directories of failed tests' | ||
condition: failed() | ||
inputs: | ||
PathtoPublish: t/failed-test-artifacts | ||
ArtifactName: failed-vs-test-artifacts | ||
|
||
- job: linux_clang | ||
displayName: linux-clang | ||
condition: succeeded() | ||
pool: | ||
vmImage: ubuntu-latest | ||
steps: | ||
- bash: | | ||
export CC=clang || exit 1 | ||
ci/install-dependencies.sh || exit 1 | ||
ci/run-build-and-tests.sh || { | ||
ci/print-test-failures.sh | ||
exit 1 | ||
} | ||
displayName: 'ci/run-build-and-tests.sh' | ||
- task: PublishTestResults@2 | ||
displayName: 'Publish Test Results **/TEST-*.xml' | ||
inputs: | ||
mergeTestResults: true | ||
testRunTitle: 'linux-clang' | ||
platform: Linux | ||
publishRunAttachments: false | ||
condition: succeededOrFailed() | ||
- task: PublishBuildArtifacts@1 | ||
displayName: 'Publish trash directories of failed tests' | ||
condition: failed() | ||
inputs: | ||
PathtoPublish: t/failed-test-artifacts | ||
ArtifactName: failed-test-artifacts | ||
|
||
- job: linux_gcc | ||
displayName: linux-gcc | ||
condition: succeeded() | ||
pool: | ||
vmImage: ubuntu-latest | ||
steps: | ||
- bash: | | ||
ci/install-dependencies.sh || exit 1 | ||
ci/run-build-and-tests.sh || { | ||
ci/print-test-failures.sh | ||
exit 1 | ||
} | ||
displayName: 'ci/run-build-and-tests.sh' | ||
- task: PublishTestResults@2 | ||
displayName: 'Publish Test Results **/TEST-*.xml' | ||
inputs: | ||
mergeTestResults: true | ||
testRunTitle: 'linux-gcc' | ||
platform: Linux | ||
publishRunAttachments: false | ||
condition: succeededOrFailed() | ||
- task: PublishBuildArtifacts@1 | ||
displayName: 'Publish trash directories of failed tests' | ||
condition: failed() | ||
inputs: | ||
PathtoPublish: t/failed-test-artifacts | ||
ArtifactName: failed-test-artifacts | ||
|
||
- job: osx_clang | ||
displayName: osx-clang | ||
condition: succeeded() | ||
pool: | ||
vmImage: macOS-latest | ||
steps: | ||
- bash: | | ||
export CC=clang | ||
ci/install-dependencies.sh || exit 1 | ||
ci/run-build-and-tests.sh || { | ||
ci/print-test-failures.sh | ||
exit 1 | ||
} | ||
displayName: 'ci/run-build-and-tests.sh' | ||
- task: PublishTestResults@2 | ||
displayName: 'Publish Test Results **/TEST-*.xml' | ||
inputs: | ||
mergeTestResults: true | ||
testRunTitle: 'osx-clang' | ||
platform: macOS | ||
publishRunAttachments: false | ||
condition: succeededOrFailed() | ||
- task: PublishBuildArtifacts@1 | ||
displayName: 'Publish trash directories of failed tests' | ||
condition: failed() | ||
inputs: | ||
PathtoPublish: t/failed-test-artifacts | ||
ArtifactName: failed-test-artifacts | ||
|
||
- job: osx_gcc | ||
displayName: osx-gcc | ||
condition: succeeded() | ||
pool: | ||
vmImage: macOS-latest | ||
steps: | ||
- bash: | | ||
ci/install-dependencies.sh || exit 1 | ||
ci/run-build-and-tests.sh || { | ||
ci/print-test-failures.sh | ||
exit 1 | ||
} | ||
displayName: 'ci/run-build-and-tests.sh' | ||
- task: PublishTestResults@2 | ||
displayName: 'Publish Test Results **/TEST-*.xml' | ||
inputs: | ||
mergeTestResults: true | ||
testRunTitle: 'osx-gcc' | ||
platform: macOS | ||
publishRunAttachments: false | ||
condition: succeededOrFailed() | ||
- task: PublishBuildArtifacts@1 | ||
displayName: 'Publish trash directories of failed tests' | ||
condition: failed() | ||
inputs: | ||
PathtoPublish: t/failed-test-artifacts | ||
ArtifactName: failed-test-artifacts | ||
|
||
- job: linux32 | ||
displayName: Linux32 | ||
condition: succeeded() | ||
pool: | ||
vmImage: ubuntu-latest | ||
steps: | ||
- bash: | | ||
res=0 | ||
sudo AGENT_OS="$AGENT_OS" BUILD_BUILDNUMBER="$BUILD_BUILDNUMBER" BUILD_REPOSITORY_URI="$BUILD_REPOSITORY_URI" BUILD_SOURCEBRANCH="$BUILD_SOURCEBRANCH" BUILD_SOURCEVERSION="$BUILD_SOURCEVERSION" SYSTEM_PHASENAME="$SYSTEM_PHASENAME" SYSTEM_TASKDEFINITIONSURI="$SYSTEM_TASKDEFINITIONSURI" SYSTEM_TEAMPROJECT="$SYSTEM_TEAMPROJECT" CC=$CC MAKEFLAGS="$MAKEFLAGS" jobname=linux32 bash -lxc ci/run-docker.sh || res=1 | ||
sudo chmod a+r t/out/TEST-*.xml | ||
test ! -d t/failed-test-artifacts || sudo chmod a+r t/failed-test-artifacts | ||
exit $res | ||
displayName: 'jobname=linux32 ci/run-docker.sh' | ||
- task: PublishTestResults@2 | ||
displayName: 'Publish Test Results **/TEST-*.xml' | ||
inputs: | ||
mergeTestResults: true | ||
testRunTitle: 'linux32' | ||
platform: Linux | ||
publishRunAttachments: false | ||
condition: succeededOrFailed() | ||
- task: PublishBuildArtifacts@1 | ||
displayName: 'Publish trash directories of failed tests' | ||
condition: failed() | ||
inputs: | ||
PathtoPublish: t/failed-test-artifacts | ||
ArtifactName: failed-test-artifacts | ||
|
||
- job: static_analysis | ||
displayName: StaticAnalysis | ||
condition: succeeded() | ||
pool: | ||
vmImage: ubuntu-22.04 | ||
steps: | ||
- bash: | | ||
sudo apt-get update && | ||
sudo apt-get install -y coccinelle libcurl4-openssl-dev libssl-dev libexpat-dev gettext && | ||
export jobname=StaticAnalysis && | ||
ci/run-static-analysis.sh || exit 1 | ||
displayName: 'ci/run-static-analysis.sh' | ||
- job: documentation | ||
displayName: Documentation | ||
condition: succeeded() | ||
pool: | ||
vmImage: ubuntu-latest | ||
steps: | ||
- bash: | | ||
sudo apt-get update && | ||
sudo apt-get install -y asciidoc xmlto asciidoctor docbook-xsl-ns && | ||
export ALREADY_HAVE_ASCIIDOCTOR=yes. && | ||
export jobname=Documentation && | ||
ci/test-documentation.sh || exit 1 | ||
displayName: 'ci/test-documentation.sh' |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,154 @@ | ||
#define USE_THE_REPOSITORY_VARIABLE /* for core_apply_sparse_checkout */ | ||
|
||
#include "builtin.h" | ||
#include "git-compat-util.h" | ||
#include "config.h" | ||
#include "parse-options.h" | ||
#include "repository.h" | ||
#include "commit.h" | ||
#include "dir.h" | ||
#include "environment.h" | ||
#include "hex.h" | ||
#include "tree.h" | ||
#include "tree-walk.h" | ||
#include "object.h" | ||
#include "object-store-ll.h" | ||
#include "oid-array.h" | ||
#include "oidset.h" | ||
#include "promisor-remote.h" | ||
#include "strmap.h" | ||
#include "string-list.h" | ||
#include "revision.h" | ||
#include "trace2.h" | ||
#include "progress.h" | ||
#include "packfile.h" | ||
#include "path-walk.h" | ||
|
||
static const char * const builtin_backfill_usage[] = { | ||
N_("(EXPERIMENTAL) git backfill [--batch-size=<n>] [--[no-]sparse]"), | ||
NULL | ||
}; | ||
|
||
struct backfill_context { | ||
struct repository *repo; | ||
struct oid_array current_batch; | ||
size_t batch_size; | ||
int sparse; | ||
}; | ||
|
||
static void clear_backfill_context(struct backfill_context *ctx) | ||
{ | ||
oid_array_clear(&ctx->current_batch); | ||
} | ||
|
||
static void download_batch(struct backfill_context *ctx) | ||
{ | ||
promisor_remote_get_direct(ctx->repo, | ||
ctx->current_batch.oid, | ||
ctx->current_batch.nr); | ||
oid_array_clear(&ctx->current_batch); | ||
|
||
/* | ||
* We likely have a new packfile. Add it to the packed list to | ||
* avoid possible duplicate downloads of the same objects. | ||
*/ | ||
reprepare_packed_git(ctx->repo); | ||
} | ||
|
||
static int fill_missing_blobs(const char *path UNUSED, | ||
struct oid_array *list, | ||
enum object_type type, | ||
void *data) | ||
{ | ||
struct backfill_context *ctx = data; | ||
|
||
if (type != OBJ_BLOB) | ||
return 0; | ||
|
||
for (size_t i = 0; i < list->nr; i++) { | ||
off_t size = 0; | ||
struct object_info info = OBJECT_INFO_INIT; | ||
info.disk_sizep = &size; | ||
if (oid_object_info_extended(ctx->repo, | ||
&list->oid[i], | ||
&info, | ||
OBJECT_INFO_FOR_PREFETCH) || | ||
!size) | ||
oid_array_append(&ctx->current_batch, &list->oid[i]); | ||
} | ||
|
||
if (ctx->current_batch.nr >= ctx->batch_size) | ||
download_batch(ctx); | ||
|
||
return 0; | ||
} | ||
|
||
static int do_backfill(struct backfill_context *ctx) | ||
{ | ||
struct rev_info revs; | ||
struct path_walk_info info = PATH_WALK_INFO_INIT; | ||
int ret; | ||
|
||
if (ctx->sparse) { | ||
CALLOC_ARRAY(info.pl, 1); | ||
if (get_sparse_checkout_patterns(info.pl)) { | ||
clear_pattern_list(info.pl); | ||
free(info.pl); | ||
return error(_("problem loading sparse-checkout")); | ||
} | ||
} | ||
|
||
repo_init_revisions(ctx->repo, &revs, ""); | ||
handle_revision_arg("HEAD", &revs, 0, 0); | ||
|
||
info.blobs = 1; | ||
info.tags = info.commits = info.trees = 0; | ||
|
||
info.revs = &revs; | ||
info.path_fn = fill_missing_blobs; | ||
info.path_fn_data = ctx; | ||
|
||
ret = walk_objects_by_path(&info); | ||
|
||
/* Download the objects that did not fill a batch. */ | ||
if (!ret) | ||
download_batch(ctx); | ||
|
||
clear_backfill_context(ctx); | ||
release_revisions(&revs); | ||
if (info.pl) { | ||
clear_pattern_list(info.pl); | ||
free(info.pl); | ||
} | ||
return ret; | ||
} | ||
|
||
int cmd_backfill(int argc, const char **argv, const char *prefix, struct repository *repo) | ||
{ | ||
struct backfill_context ctx = { | ||
.repo = repo, | ||
.current_batch = OID_ARRAY_INIT, | ||
.batch_size = 50000, | ||
.sparse = 0, | ||
}; | ||
struct option options[] = { | ||
OPT_INTEGER(0, "batch-size", &ctx.batch_size, | ||
N_("Minimun number of objects to request at a time")), | ||
OPT_BOOL(0, "sparse", &ctx.sparse, | ||
N_("Restrict the missing objects to the current sparse-checkout")), | ||
OPT_END(), | ||
}; | ||
|
||
if (argc == 2 && !strcmp(argv[1], "-h")) | ||
usage_with_options(builtin_backfill_usage, options); | ||
|
||
argc = parse_options(argc, argv, prefix, options, builtin_backfill_usage, | ||
0); | ||
|
||
repo_config(repo, git_default_config, NULL); | ||
|
||
if (ctx.sparse < 0) | ||
ctx.sparse = core_apply_sparse_checkout; | ||
|
||
return do_backfill(&ctx); | ||
} |
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,21 @@ | ||
MIT License | ||
|
||
Copyright (c) 2018-2021 Microsoft Corporation, Daan Leijen | ||
|
||
Permission is hereby granted, free of charge, to any person obtaining a copy | ||
of this software and associated documentation files (the "Software"), to deal | ||
in the Software without restriction, including without limitation the rights | ||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
copies of the Software, and to permit persons to whom the Software is | ||
furnished to do so, subject to the following conditions: | ||
|
||
The above copyright notice and this permission notice shall be included in all | ||
copies or substantial portions of the Software. | ||
|
||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
SOFTWARE. |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,298 @@ | ||
/* ---------------------------------------------------------------------------- | ||
Copyright (c) 2018-2021, Microsoft Research, Daan Leijen | ||
This is free software; you can redistribute it and/or modify it under the | ||
terms of the MIT license. A copy of the license can be found in the file | ||
"LICENSE" at the root of this distribution. | ||
-----------------------------------------------------------------------------*/ | ||
|
||
#include "mimalloc.h" | ||
#include "mimalloc/internal.h" | ||
#include "mimalloc/prim.h" // mi_prim_get_default_heap | ||
|
||
#include <string.h> // memset | ||
|
||
// ------------------------------------------------------ | ||
// Aligned Allocation | ||
// ------------------------------------------------------ | ||
|
||
// Fallback primitive aligned allocation -- split out for better codegen | ||
static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept | ||
{ | ||
mi_assert_internal(size <= PTRDIFF_MAX); | ||
mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment)); | ||
|
||
const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)` | ||
const size_t padsize = size + MI_PADDING_SIZE; | ||
|
||
// use regular allocation if it is guaranteed to fit the alignment constraints | ||
if (offset==0 && alignment<=padsize && padsize<=MI_MAX_ALIGN_GUARANTEE && (padsize&align_mask)==0) { | ||
void* p = _mi_heap_malloc_zero(heap, size, zero); | ||
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0); | ||
return p; | ||
} | ||
|
||
void* p; | ||
size_t oversize; | ||
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) { | ||
// use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page) | ||
// This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the | ||
// first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down) | ||
if mi_unlikely(offset != 0) { | ||
// todo: cannot support offset alignment for very large alignments yet | ||
#if MI_DEBUG > 0 | ||
_mi_error_message(EOVERFLOW, "aligned allocation with a very large alignment cannot be used with an alignment offset (size %zu, alignment %zu, offset %zu)\n", size, alignment, offset); | ||
#endif | ||
return NULL; | ||
} | ||
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size); | ||
p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block | ||
// zero afterwards as only the area from the aligned_p may be committed! | ||
if (p == NULL) return NULL; | ||
} | ||
else { | ||
// otherwise over-allocate | ||
oversize = size + alignment - 1; | ||
p = _mi_heap_malloc_zero(heap, oversize, zero); | ||
if (p == NULL) return NULL; | ||
} | ||
|
||
// .. and align within the allocation | ||
const uintptr_t poffset = ((uintptr_t)p + offset) & align_mask; | ||
const uintptr_t adjust = (poffset == 0 ? 0 : alignment - poffset); | ||
mi_assert_internal(adjust < alignment); | ||
void* aligned_p = (void*)((uintptr_t)p + adjust); | ||
if (aligned_p != p) { | ||
mi_page_t* page = _mi_ptr_page(p); | ||
mi_page_set_has_aligned(page, true); | ||
_mi_padding_shrink(page, (mi_block_t*)p, adjust + size); | ||
} | ||
// todo: expand padding if overallocated ? | ||
|
||
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size); | ||
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p)); | ||
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0); | ||
mi_assert_internal(mi_usable_size(aligned_p)>=size); | ||
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust); | ||
|
||
// now zero the block if needed | ||
if (alignment > MI_ALIGNMENT_MAX) { | ||
// for the tracker, on huge aligned allocations only from the start of the large block is defined | ||
mi_track_mem_undefined(aligned_p, size); | ||
if (zero) { | ||
_mi_memzero_aligned(aligned_p, mi_usable_size(aligned_p)); | ||
} | ||
} | ||
|
||
if (p != aligned_p) { | ||
mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p)); | ||
} | ||
return aligned_p; | ||
} | ||
|
||
// Primitive aligned allocation | ||
static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept | ||
{ | ||
// note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size. | ||
if mi_unlikely(alignment == 0 || !_mi_is_power_of_two(alignment)) { // require power-of-two (see <https://en.cppreference.com/w/c/memory/aligned_alloc>) | ||
#if MI_DEBUG > 0 | ||
_mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment); | ||
#endif | ||
return NULL; | ||
} | ||
|
||
if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>) | ||
#if MI_DEBUG > 0 | ||
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment); | ||
#endif | ||
return NULL; | ||
} | ||
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)` | ||
const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size > PTRDIFF_MAX check | ||
|
||
// try first if there happens to be a small block available with just the right alignment | ||
if mi_likely(padsize <= MI_SMALL_SIZE_MAX && alignment <= padsize) { | ||
mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize); | ||
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0; | ||
if mi_likely(page->free != NULL && is_aligned) | ||
{ | ||
#if MI_STAT>1 | ||
mi_heap_stat_increase(heap, malloc, size); | ||
#endif | ||
void* p = _mi_page_malloc(heap, page, padsize, zero); // TODO: inline _mi_page_malloc | ||
mi_assert_internal(p != NULL); | ||
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0); | ||
mi_track_malloc(p,size,zero); | ||
return p; | ||
} | ||
} | ||
// fallback | ||
return mi_heap_malloc_zero_aligned_at_fallback(heap, size, alignment, offset, zero); | ||
} | ||
|
||
|
||
// ------------------------------------------------------ | ||
// Optimized mi_heap_malloc_aligned / mi_malloc_aligned | ||
// ------------------------------------------------------ | ||
|
||
mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { | ||
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, false); | ||
} | ||
|
||
mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept { | ||
if mi_unlikely(alignment == 0 || !_mi_is_power_of_two(alignment)) return NULL; | ||
#if !MI_PADDING | ||
// without padding, any small sized allocation is naturally aligned (see also `_mi_segment_page_start`) | ||
if mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX) | ||
#else | ||
// with padding, we can only guarantee this for fixed alignments | ||
if mi_likely((alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2))) | ||
&& size <= MI_SMALL_SIZE_MAX) | ||
#endif | ||
{ | ||
// fast path for common alignment and size | ||
return mi_heap_malloc_small(heap, size); | ||
} | ||
else { | ||
return mi_heap_malloc_aligned_at(heap, size, alignment, 0); | ||
} | ||
} | ||
|
||
// ensure a definition is emitted | ||
#if defined(__cplusplus) | ||
static void* _mi_heap_malloc_aligned = (void*)&mi_heap_malloc_aligned; | ||
#endif | ||
|
||
// ------------------------------------------------------ | ||
// Aligned Allocation | ||
// ------------------------------------------------------ | ||
|
||
mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { | ||
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, true); | ||
} | ||
|
||
mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept { | ||
return mi_heap_zalloc_aligned_at(heap, size, alignment, 0); | ||
} | ||
|
||
mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { | ||
size_t total; | ||
if (mi_count_size_overflow(count, size, &total)) return NULL; | ||
return mi_heap_zalloc_aligned_at(heap, total, alignment, offset); | ||
} | ||
|
||
mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept { | ||
return mi_heap_calloc_aligned_at(heap,count,size,alignment,0); | ||
} | ||
|
||
mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept { | ||
return mi_heap_malloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset); | ||
} | ||
|
||
mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept { | ||
return mi_heap_malloc_aligned(mi_prim_get_default_heap(), size, alignment); | ||
} | ||
|
||
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept { | ||
return mi_heap_zalloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset); | ||
} | ||
|
||
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept { | ||
return mi_heap_zalloc_aligned(mi_prim_get_default_heap(), size, alignment); | ||
} | ||
|
||
mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { | ||
return mi_heap_calloc_aligned_at(mi_prim_get_default_heap(), count, size, alignment, offset); | ||
} | ||
|
||
mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept { | ||
return mi_heap_calloc_aligned(mi_prim_get_default_heap(), count, size, alignment); | ||
} | ||
|
||
|
||
// ------------------------------------------------------ | ||
// Aligned re-allocation | ||
// ------------------------------------------------------ | ||
|
||
static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, bool zero) mi_attr_noexcept { | ||
mi_assert(alignment > 0); | ||
if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero); | ||
if (p == NULL) return mi_heap_malloc_zero_aligned_at(heap,newsize,alignment,offset,zero); | ||
size_t size = mi_usable_size(p); | ||
if (newsize <= size && newsize >= (size - (size / 2)) | ||
&& (((uintptr_t)p + offset) % alignment) == 0) { | ||
return p; // reallocation still fits, is aligned and not more than 50% waste | ||
} | ||
else { | ||
// note: we don't zero allocate upfront so we only zero initialize the expanded part | ||
void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset); | ||
if (newp != NULL) { | ||
if (zero && newsize > size) { | ||
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized | ||
size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0); | ||
_mi_memzero((uint8_t*)newp + start, newsize - start); | ||
} | ||
_mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize)); | ||
mi_free(p); // only free if successful | ||
} | ||
return newp; | ||
} | ||
} | ||
|
||
static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, bool zero) mi_attr_noexcept { | ||
mi_assert(alignment > 0); | ||
if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero); | ||
size_t offset = ((uintptr_t)p % alignment); // use offset of previous allocation (p can be NULL) | ||
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero); | ||
} | ||
|
||
mi_decl_nodiscard void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { | ||
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false); | ||
} | ||
|
||
mi_decl_nodiscard void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept { | ||
return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false); | ||
} | ||
|
||
mi_decl_nodiscard void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { | ||
return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true); | ||
} | ||
|
||
mi_decl_nodiscard void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept { | ||
return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true); | ||
} | ||
|
||
mi_decl_nodiscard void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { | ||
size_t total; | ||
if (mi_count_size_overflow(newcount, size, &total)) return NULL; | ||
return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset); | ||
} | ||
|
||
mi_decl_nodiscard void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { | ||
size_t total; | ||
if (mi_count_size_overflow(newcount, size, &total)) return NULL; | ||
return mi_heap_rezalloc_aligned(heap, p, total, alignment); | ||
} | ||
|
||
mi_decl_nodiscard void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { | ||
return mi_heap_realloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset); | ||
} | ||
|
||
mi_decl_nodiscard void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept { | ||
return mi_heap_realloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment); | ||
} | ||
|
||
mi_decl_nodiscard void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { | ||
return mi_heap_rezalloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset); | ||
} | ||
|
||
mi_decl_nodiscard void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept { | ||
return mi_heap_rezalloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment); | ||
} | ||
|
||
mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { | ||
return mi_heap_recalloc_aligned_at(mi_prim_get_default_heap(), p, newcount, size, alignment, offset); | ||
} | ||
|
||
mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { | ||
return mi_heap_recalloc_aligned(mi_prim_get_default_heap(), p, newcount, size, alignment); | ||
} |
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,115 @@ | ||
/* ---------------------------------------------------------------------------- | ||
Copyright (c) 2019-2023 Microsoft Research, Daan Leijen | ||
This is free software; you can redistribute it and/or modify it under the | ||
terms of the MIT license. A copy of the license can be found in the file | ||
"LICENSE" at the root of this distribution. | ||
-----------------------------------------------------------------------------*/ | ||
|
||
/* ---------------------------------------------------------------------------- | ||
Concurrent bitmap that can set/reset sequences of bits atomically, | ||
represeted as an array of fields where each field is a machine word (`size_t`) | ||
There are two api's; the standard one cannot have sequences that cross | ||
between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS). | ||
(this is used in region allocation) | ||
The `_across` postfixed functions do allow sequences that can cross over | ||
between the fields. (This is used in arena allocation) | ||
---------------------------------------------------------------------------- */ | ||
#pragma once | ||
#ifndef MI_BITMAP_H | ||
#define MI_BITMAP_H | ||
|
||
/* ----------------------------------------------------------- | ||
Bitmap definition | ||
----------------------------------------------------------- */ | ||
|
||
#define MI_BITMAP_FIELD_BITS (8*MI_SIZE_SIZE) | ||
#define MI_BITMAP_FIELD_FULL (~((size_t)0)) // all bits set | ||
|
||
// An atomic bitmap of `size_t` fields | ||
typedef _Atomic(size_t) mi_bitmap_field_t; | ||
typedef mi_bitmap_field_t* mi_bitmap_t; | ||
|
||
// A bitmap index is the index of the bit in a bitmap. | ||
typedef size_t mi_bitmap_index_t; | ||
|
||
// Create a bit index. | ||
static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx) { | ||
mi_assert_internal(bitidx < MI_BITMAP_FIELD_BITS); | ||
return (idx*MI_BITMAP_FIELD_BITS) + bitidx; | ||
} | ||
|
||
// Create a bit index. | ||
static inline mi_bitmap_index_t mi_bitmap_index_create_from_bit(size_t full_bitidx) { | ||
return mi_bitmap_index_create(full_bitidx / MI_BITMAP_FIELD_BITS, full_bitidx % MI_BITMAP_FIELD_BITS); | ||
} | ||
|
||
// Get the field index from a bit index. | ||
static inline size_t mi_bitmap_index_field(mi_bitmap_index_t bitmap_idx) { | ||
return (bitmap_idx / MI_BITMAP_FIELD_BITS); | ||
} | ||
|
||
// Get the bit index in a bitmap field | ||
static inline size_t mi_bitmap_index_bit_in_field(mi_bitmap_index_t bitmap_idx) { | ||
return (bitmap_idx % MI_BITMAP_FIELD_BITS); | ||
} | ||
|
||
// Get the full bit index | ||
static inline size_t mi_bitmap_index_bit(mi_bitmap_index_t bitmap_idx) { | ||
return bitmap_idx; | ||
} | ||
|
||
/* ----------------------------------------------------------- | ||
Claim a bit sequence atomically | ||
----------------------------------------------------------- */ | ||
|
||
// Try to atomically claim a sequence of `count` bits in a single | ||
// field at `idx` in `bitmap`. Returns `true` on success. | ||
bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx); | ||
|
||
// Starts at idx, and wraps around to search in all `bitmap_fields` fields. | ||
// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields. | ||
bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx); | ||
|
||
// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled | ||
typedef bool (mi_cdecl *mi_bitmap_pred_fun_t)(mi_bitmap_index_t bitmap_idx, void* pred_arg); | ||
bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_pred_fun_t pred_fun, void* pred_arg, mi_bitmap_index_t* bitmap_idx); | ||
|
||
// Set `count` bits at `bitmap_idx` to 0 atomically | ||
// Returns `true` if all `count` bits were 1 previously. | ||
bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); | ||
|
||
// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically. | ||
// Returns `true` if successful when all previous `count` bits were 0. | ||
bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); | ||
|
||
// Set `count` bits at `bitmap_idx` to 1 atomically | ||
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. | ||
bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero); | ||
|
||
bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); | ||
bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); | ||
|
||
|
||
//-------------------------------------------------------------------------- | ||
// the `_across` functions work on bitmaps where sequences can cross over | ||
// between the fields. This is used in arena allocation | ||
//-------------------------------------------------------------------------- | ||
|
||
// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success. | ||
// Starts at idx, and wraps around to search in all `bitmap_fields` fields. | ||
bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx); | ||
|
||
// Set `count` bits at `bitmap_idx` to 0 atomically | ||
// Returns `true` if all `count` bits were 1 previously. | ||
bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); | ||
|
||
// Set `count` bits at `bitmap_idx` to 1 atomically | ||
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. | ||
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero); | ||
|
||
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); | ||
bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); | ||
|
||
#endif |
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,323 @@ | ||
/* ---------------------------------------------------------------------------- | ||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen | ||
This is free software; you can redistribute it and/or modify it under the | ||
terms of the MIT license. A copy of the license can be found in the file | ||
"LICENSE" at the root of this distribution. | ||
-----------------------------------------------------------------------------*/ | ||
#pragma once | ||
#ifndef MIMALLOC_PRIM_H | ||
#define MIMALLOC_PRIM_H | ||
|
||
|
||
// -------------------------------------------------------------------------- | ||
// This file specifies the primitive portability API. | ||
// Each OS/host needs to implement these primitives, see `src/prim` | ||
// for implementations on Window, macOS, WASI, and Linux/Unix. | ||
// | ||
// note: on all primitive functions, we always have result parameters != NUL, and: | ||
// addr != NULL and page aligned | ||
// size > 0 and page aligned | ||
// return value is an error code an int where 0 is success. | ||
// -------------------------------------------------------------------------- | ||
|
||
// OS memory configuration | ||
typedef struct mi_os_mem_config_s { | ||
size_t page_size; // 4KiB | ||
size_t large_page_size; // 2MiB | ||
size_t alloc_granularity; // smallest allocation size (on Windows 64KiB) | ||
bool has_overcommit; // can we reserve more memory than can be actually committed? | ||
bool must_free_whole; // must allocated blocks be freed as a whole (false for mmap, true for VirtualAlloc) | ||
bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory) | ||
} mi_os_mem_config_t; | ||
|
||
// Initialize | ||
void _mi_prim_mem_init( mi_os_mem_config_t* config ); | ||
|
||
// Free OS memory | ||
int _mi_prim_free(void* addr, size_t size ); | ||
|
||
// Allocate OS memory. Return NULL on error. | ||
// The `try_alignment` is just a hint and the returned pointer does not have to be aligned. | ||
// If `commit` is false, the virtual memory range only needs to be reserved (with no access) | ||
// which will later be committed explicitly using `_mi_prim_commit`. | ||
// `is_zero` is set to true if the memory was zero initialized (as on most OS's) | ||
// pre: !commit => !allow_large | ||
// try_alignment >= _mi_os_page_size() and a power of 2 | ||
int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr); | ||
|
||
// Commit memory. Returns error code or 0 on success. | ||
// For example, on Linux this would make the memory PROT_READ|PROT_WRITE. | ||
// `is_zero` is set to true if the memory was zero initialized (e.g. on Windows) | ||
int _mi_prim_commit(void* addr, size_t size, bool* is_zero); | ||
|
||
// Decommit memory. Returns error code or 0 on success. The `needs_recommit` result is true | ||
// if the memory would need to be re-committed. For example, on Windows this is always true, | ||
// but on Linux we could use MADV_DONTNEED to decommit which does not need a recommit. | ||
// pre: needs_recommit != NULL | ||
int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit); | ||
|
||
// Reset memory. The range keeps being accessible but the content might be reset. | ||
// Returns error code or 0 on success. | ||
int _mi_prim_reset(void* addr, size_t size); | ||
|
||
// Protect memory. Returns error code or 0 on success. | ||
int _mi_prim_protect(void* addr, size_t size, bool protect); | ||
|
||
// Allocate huge (1GiB) pages possibly associated with a NUMA node. | ||
// `is_zero` is set to true if the memory was zero initialized (as on most OS's) | ||
// pre: size > 0 and a multiple of 1GiB. | ||
// numa_node is either negative (don't care), or a numa node number. | ||
int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr); | ||
|
||
// Return the current NUMA node | ||
size_t _mi_prim_numa_node(void); | ||
|
||
// Return the number of logical NUMA nodes | ||
size_t _mi_prim_numa_node_count(void); | ||
|
||
// Clock ticks | ||
mi_msecs_t _mi_prim_clock_now(void); | ||
|
||
// Return process information (only for statistics) | ||
typedef struct mi_process_info_s { | ||
mi_msecs_t elapsed; | ||
mi_msecs_t utime; | ||
mi_msecs_t stime; | ||
size_t current_rss; | ||
size_t peak_rss; | ||
size_t current_commit; | ||
size_t peak_commit; | ||
size_t page_faults; | ||
} mi_process_info_t; | ||
|
||
void _mi_prim_process_info(mi_process_info_t* pinfo); | ||
|
||
// Default stderr output. (only for warnings etc. with verbose enabled) | ||
// msg != NULL && _mi_strlen(msg) > 0 | ||
void _mi_prim_out_stderr( const char* msg ); | ||
|
||
// Get an environment variable. (only for options) | ||
// name != NULL, result != NULL, result_size >= 64 | ||
bool _mi_prim_getenv(const char* name, char* result, size_t result_size); | ||
|
||
|
||
// Fill a buffer with strong randomness; return `false` on error or if | ||
// there is no strong randomization available. | ||
bool _mi_prim_random_buf(void* buf, size_t buf_len); | ||
|
||
// Called on the first thread start, and should ensure `_mi_thread_done` is called on thread termination. | ||
void _mi_prim_thread_init_auto_done(void); | ||
|
||
// Called on process exit and may take action to clean up resources associated with the thread auto done. | ||
void _mi_prim_thread_done_auto_done(void); | ||
|
||
// Called when the default heap for a thread changes | ||
void _mi_prim_thread_associate_default_heap(mi_heap_t* heap); | ||
|
||
|
||
//------------------------------------------------------------------- | ||
// Thread id: `_mi_prim_thread_id()` | ||
// | ||
// Getting the thread id should be performant as it is called in the | ||
// fast path of `_mi_free` and we specialize for various platforms as | ||
// inlined definitions. Regular code should call `init.c:_mi_thread_id()`. | ||
// We only require _mi_prim_thread_id() to return a unique id | ||
// for each thread (unequal to zero). | ||
//------------------------------------------------------------------- | ||
|
||
// defined in `init.c`; do not use these directly | ||
extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from | ||
extern bool _mi_process_is_initialized; // has mi_process_init been called? | ||
|
||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept; | ||
|
||
#if defined(_WIN32) | ||
|
||
#define WIN32_LEAN_AND_MEAN | ||
#include <windows.h> | ||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { | ||
// Windows: works on Intel and ARM in both 32- and 64-bit | ||
return (uintptr_t)NtCurrentTeb(); | ||
} | ||
|
||
// We use assembly for a fast thread id on the main platforms. The TLS layout depends on | ||
// both the OS and libc implementation so we use specific tests for each main platform. | ||
// If you test on another platform and it works please send a PR :-) | ||
// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register. | ||
#elif defined(__GNUC__) && ( \ | ||
(defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \ | ||
|| (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__))) \ | ||
|| (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \ | ||
|| (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ | ||
|| (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ | ||
) | ||
|
||
static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept { | ||
void* res; | ||
const size_t ofs = (slot*sizeof(void*)); | ||
#if defined(__i386__) | ||
__asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS | ||
#elif defined(__APPLE__) && defined(__x86_64__) | ||
__asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS | ||
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4) | ||
__asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI | ||
#elif defined(__x86_64__) | ||
__asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS | ||
#elif defined(__arm__) | ||
void** tcb; MI_UNUSED(ofs); | ||
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb)); | ||
res = tcb[slot]; | ||
#elif defined(__aarch64__) | ||
void** tcb; MI_UNUSED(ofs); | ||
#if defined(__APPLE__) // M1, issue #343 | ||
__asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb)); | ||
#else | ||
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb)); | ||
#endif | ||
res = tcb[slot]; | ||
#endif | ||
return res; | ||
} | ||
|
||
// setting a tls slot is only used on macOS for now | ||
static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept { | ||
const size_t ofs = (slot*sizeof(void*)); | ||
#if defined(__i386__) | ||
__asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS | ||
#elif defined(__APPLE__) && defined(__x86_64__) | ||
__asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS | ||
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4) | ||
__asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI | ||
#elif defined(__x86_64__) | ||
__asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS | ||
#elif defined(__arm__) | ||
void** tcb; MI_UNUSED(ofs); | ||
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb)); | ||
tcb[slot] = value; | ||
#elif defined(__aarch64__) | ||
void** tcb; MI_UNUSED(ofs); | ||
#if defined(__APPLE__) // M1, issue #343 | ||
__asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb)); | ||
#else | ||
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb)); | ||
#endif | ||
tcb[slot] = value; | ||
#endif | ||
} | ||
|
||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { | ||
#if defined(__BIONIC__) | ||
// issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id | ||
// see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86 | ||
return (uintptr_t)mi_prim_tls_slot(1); | ||
#else | ||
// in all our other targets, slot 0 is the thread id | ||
// glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h | ||
// apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36 | ||
return (uintptr_t)mi_prim_tls_slot(0); | ||
#endif | ||
} | ||
|
||
#else | ||
|
||
// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms). | ||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { | ||
return (uintptr_t)&_mi_heap_default; | ||
} | ||
|
||
#endif | ||
|
||
|
||
|
||
/* ---------------------------------------------------------------------------------------- | ||
The thread local default heap: `_mi_prim_get_default_heap()` | ||
This is inlined here as it is on the fast path for allocation functions. | ||
On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a | ||
__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures | ||
that the storage will always be available (allocated on the thread stacks). | ||
On some platforms though we cannot use that when overriding `malloc` since the underlying | ||
TLS implementation (or the loader) will call itself `malloc` on a first access and recurse. | ||
We try to circumvent this in an efficient way: | ||
- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the | ||
loader itself calls `malloc` even before the modules are initialized. | ||
- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS). | ||
- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323) | ||
------------------------------------------------------------------------------------------- */ | ||
|
||
static inline mi_heap_t* mi_prim_get_default_heap(void); | ||
|
||
#if defined(MI_MALLOC_OVERRIDE) | ||
#if defined(__APPLE__) // macOS | ||
#define MI_TLS_SLOT 89 // seems unused? | ||
// #define MI_TLS_RECURSE_GUARD 1 | ||
// other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89) | ||
// see <https://github.com/rweichler/substrate/blob/master/include/pthread_machdep.h> | ||
#elif defined(__OpenBSD__) | ||
// use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16) | ||
// see <https://github.com/openbsd/src/blob/master/lib/libc/include/thread_private.h#L371> | ||
#define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24) | ||
// #elif defined(__DragonFly__) | ||
// #warning "mimalloc is not working correctly on DragonFly yet." | ||
// #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) <https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/lib/libthread_xu/thread/thr_private.h#L458> | ||
#elif defined(__ANDROID__) | ||
// See issue #381 | ||
#define MI_TLS_PTHREAD | ||
#endif | ||
#endif | ||
|
||
|
||
#if defined(MI_TLS_SLOT) | ||
|
||
static inline mi_heap_t* mi_prim_get_default_heap(void) { | ||
mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT); | ||
if mi_unlikely(heap == NULL) { | ||
#ifdef __GNUC__ | ||
__asm(""); // prevent conditional load of the address of _mi_heap_empty | ||
#endif | ||
heap = (mi_heap_t*)&_mi_heap_empty; | ||
} | ||
return heap; | ||
} | ||
|
||
#elif defined(MI_TLS_PTHREAD_SLOT_OFS) | ||
|
||
static inline mi_heap_t** mi_prim_tls_pthread_heap_slot(void) { | ||
pthread_t self = pthread_self(); | ||
#if defined(__DragonFly__) | ||
if (self==NULL) return NULL; | ||
#endif | ||
return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS); | ||
} | ||
|
||
static inline mi_heap_t* mi_prim_get_default_heap(void) { | ||
mi_heap_t** pheap = mi_prim_tls_pthread_heap_slot(); | ||
if mi_unlikely(pheap == NULL) return _mi_heap_main_get(); | ||
mi_heap_t* heap = *pheap; | ||
if mi_unlikely(heap == NULL) return (mi_heap_t*)&_mi_heap_empty; | ||
return heap; | ||
} | ||
|
||
#elif defined(MI_TLS_PTHREAD) | ||
|
||
extern pthread_key_t _mi_heap_default_key; | ||
static inline mi_heap_t* mi_prim_get_default_heap(void) { | ||
mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key)); | ||
return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap); | ||
} | ||
|
||
#else // default using a thread local variable; used on most platforms. | ||
|
||
static inline mi_heap_t* mi_prim_get_default_heap(void) { | ||
#if defined(MI_TLS_RECURSE_GUARD) | ||
if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get(); | ||
#endif | ||
return _mi_heap_default; | ||
} | ||
|
||
#endif // mi_prim_get_default_heap() | ||
|
||
|
||
|
||
#endif // MIMALLOC_PRIM_H |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,147 @@ | ||
/* ---------------------------------------------------------------------------- | ||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen | ||
This is free software; you can redistribute it and/or modify it under the | ||
terms of the MIT license. A copy of the license can be found in the file | ||
"LICENSE" at the root of this distribution. | ||
-----------------------------------------------------------------------------*/ | ||
#pragma once | ||
#ifndef MIMALLOC_TRACK_H | ||
#define MIMALLOC_TRACK_H | ||
|
||
/* ------------------------------------------------------------------------------------------------------ | ||
Track memory ranges with macros for tools like Valgrind address sanitizer, or other memory checkers. | ||
These can be defined for tracking allocation: | ||
#define mi_track_malloc_size(p,reqsize,size,zero) | ||
#define mi_track_free_size(p,_size) | ||
The macros are set up such that the size passed to `mi_track_free_size` | ||
always matches the size of `mi_track_malloc_size`. (currently, `size == mi_usable_size(p)`). | ||
The `reqsize` is what the user requested, and `size >= reqsize`. | ||
The `size` is either byte precise (and `size==reqsize`) if `MI_PADDING` is enabled, | ||
or otherwise it is the usable block size which may be larger than the original request. | ||
Use `_mi_block_size_of(void* p)` to get the full block size that was allocated (including padding etc). | ||
The `zero` parameter is `true` if the allocated block is zero initialized. | ||
Optional: | ||
#define mi_track_align(p,alignedp,offset,size) | ||
#define mi_track_resize(p,oldsize,newsize) | ||
#define mi_track_init() | ||
The `mi_track_align` is called right after a `mi_track_malloc` for aligned pointers in a block. | ||
The corresponding `mi_track_free` still uses the block start pointer and original size (corresponding to the `mi_track_malloc`). | ||
The `mi_track_resize` is currently unused but could be called on reallocations within a block. | ||
`mi_track_init` is called at program start. | ||
The following macros are for tools like asan and valgrind to track whether memory is | ||
defined, undefined, or not accessible at all: | ||
#define mi_track_mem_defined(p,size) | ||
#define mi_track_mem_undefined(p,size) | ||
#define mi_track_mem_noaccess(p,size) | ||
-------------------------------------------------------------------------------------------------------*/ | ||
|
||
#if MI_TRACK_VALGRIND | ||
// valgrind tool | ||
|
||
#define MI_TRACK_ENABLED 1 | ||
#define MI_TRACK_HEAP_DESTROY 1 // track free of individual blocks on heap_destroy | ||
#define MI_TRACK_TOOL "valgrind" | ||
|
||
#include <valgrind/valgrind.h> | ||
#include <valgrind/memcheck.h> | ||
|
||
#define mi_track_malloc_size(p,reqsize,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero) | ||
#define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/) | ||
#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/) | ||
#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size) | ||
#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size) | ||
#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size) | ||
|
||
#elif MI_TRACK_ASAN | ||
// address sanitizer | ||
|
||
#define MI_TRACK_ENABLED 1 | ||
#define MI_TRACK_HEAP_DESTROY 0 | ||
#define MI_TRACK_TOOL "asan" | ||
|
||
#include <sanitizer/asan_interface.h> | ||
|
||
#define mi_track_malloc_size(p,reqsize,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size) | ||
#define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size) | ||
#define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size) | ||
#define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size) | ||
#define mi_track_mem_noaccess(p,size) ASAN_POISON_MEMORY_REGION(p,size) | ||
|
||
#elif MI_TRACK_ETW | ||
// windows event tracing | ||
|
||
#define MI_TRACK_ENABLED 1 | ||
#define MI_TRACK_HEAP_DESTROY 1 | ||
#define MI_TRACK_TOOL "ETW" | ||
|
||
#define WIN32_LEAN_AND_MEAN | ||
#include <windows.h> | ||
#include "../src/prim/windows/etw.h" | ||
|
||
#define mi_track_init() EventRegistermicrosoft_windows_mimalloc(); | ||
#define mi_track_malloc_size(p,reqsize,size,zero) EventWriteETW_MI_ALLOC((UINT64)(p), size) | ||
#define mi_track_free_size(p,size) EventWriteETW_MI_FREE((UINT64)(p), size) | ||
|
||
#else | ||
// no tracking | ||
|
||
#define MI_TRACK_ENABLED 0 | ||
#define MI_TRACK_HEAP_DESTROY 0 | ||
#define MI_TRACK_TOOL "none" | ||
|
||
#define mi_track_malloc_size(p,reqsize,size,zero) | ||
#define mi_track_free_size(p,_size) | ||
|
||
#endif | ||
|
||
// ------------------- | ||
// Utility definitions | ||
|
||
#ifndef mi_track_resize | ||
#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false) | ||
#endif | ||
|
||
#ifndef mi_track_align | ||
#define mi_track_align(p,alignedp,offset,size) mi_track_mem_noaccess(p,offset) | ||
#endif | ||
|
||
#ifndef mi_track_init | ||
#define mi_track_init() | ||
#endif | ||
|
||
#ifndef mi_track_mem_defined | ||
#define mi_track_mem_defined(p,size) | ||
#endif | ||
|
||
#ifndef mi_track_mem_undefined | ||
#define mi_track_mem_undefined(p,size) | ||
#endif | ||
|
||
#ifndef mi_track_mem_noaccess | ||
#define mi_track_mem_noaccess(p,size) | ||
#endif | ||
|
||
|
||
#if MI_PADDING | ||
#define mi_track_malloc(p,reqsize,zero) \ | ||
if ((p)!=NULL) { \ | ||
mi_assert_internal(mi_usable_size(p)==(reqsize)); \ | ||
mi_track_malloc_size(p,reqsize,reqsize,zero); \ | ||
} | ||
#else | ||
#define mi_track_malloc(p,reqsize,zero) \ | ||
if ((p)!=NULL) { \ | ||
mi_assert_internal(mi_usable_size(p)>=(reqsize)); \ | ||
mi_track_malloc_size(p,reqsize,mi_usable_size(p),zero); \ | ||
} | ||
#endif | ||
|
||
#endif |
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,332 @@ | ||
/*---------------------------------------------------------------------------- | ||
Copyright (c) 2018-2020, Microsoft Research, Daan Leijen | ||
This is free software; you can redistribute it and/or modify it under the | ||
terms of the MIT license. A copy of the license can be found in the file | ||
"LICENSE" at the root of this distribution. | ||
-----------------------------------------------------------------------------*/ | ||
|
||
/* ----------------------------------------------------------- | ||
Definition of page queues for each block size | ||
----------------------------------------------------------- */ | ||
|
||
#ifndef MI_IN_PAGE_C | ||
#error "this file should be included from 'page.c'" | ||
#endif | ||
|
||
/* ----------------------------------------------------------- | ||
Minimal alignment in machine words (i.e. `sizeof(void*)`) | ||
----------------------------------------------------------- */ | ||
|
||
#if (MI_MAX_ALIGN_SIZE > 4*MI_INTPTR_SIZE) | ||
#error "define alignment for more than 4x word size for this platform" | ||
#elif (MI_MAX_ALIGN_SIZE > 2*MI_INTPTR_SIZE) | ||
#define MI_ALIGN4W // 4 machine words minimal alignment | ||
#elif (MI_MAX_ALIGN_SIZE > MI_INTPTR_SIZE) | ||
#define MI_ALIGN2W // 2 machine words minimal alignment | ||
#else | ||
// ok, default alignment is 1 word | ||
#endif | ||
|
||
|
||
/* ----------------------------------------------------------- | ||
Queue query | ||
----------------------------------------------------------- */ | ||
|
||
|
||
static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) { | ||
return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t))); | ||
} | ||
|
||
static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) { | ||
return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t)))); | ||
} | ||
|
||
static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) { | ||
return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX); | ||
} | ||
|
||
/* ----------------------------------------------------------- | ||
Bins | ||
----------------------------------------------------------- */ | ||
|
||
// Return the bin for a given field size. | ||
// Returns MI_BIN_HUGE if the size is too large. | ||
// We use `wsize` for the size in "machine word sizes", | ||
// i.e. byte size == `wsize*sizeof(void*)`. | ||
static inline uint8_t mi_bin(size_t size) { | ||
size_t wsize = _mi_wsize_from_size(size); | ||
uint8_t bin; | ||
if (wsize <= 1) { | ||
bin = 1; | ||
} | ||
#if defined(MI_ALIGN4W) | ||
else if (wsize <= 4) { | ||
bin = (uint8_t)((wsize+1)&~1); // round to double word sizes | ||
} | ||
#elif defined(MI_ALIGN2W) | ||
else if (wsize <= 8) { | ||
bin = (uint8_t)((wsize+1)&~1); // round to double word sizes | ||
} | ||
#else | ||
else if (wsize <= 8) { | ||
bin = (uint8_t)wsize; | ||
} | ||
#endif | ||
else if (wsize > MI_MEDIUM_OBJ_WSIZE_MAX) { | ||
bin = MI_BIN_HUGE; | ||
} | ||
else { | ||
#if defined(MI_ALIGN4W) | ||
if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes | ||
#endif | ||
wsize--; | ||
// find the highest bit | ||
uint8_t b = (uint8_t)mi_bsr(wsize); // note: wsize != 0 | ||
// and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation). | ||
// - adjust with 3 because we use do not round the first 8 sizes | ||
// which each get an exact bin | ||
bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3; | ||
mi_assert_internal(bin < MI_BIN_HUGE); | ||
} | ||
mi_assert_internal(bin > 0 && bin <= MI_BIN_HUGE); | ||
return bin; | ||
} | ||
|
||
|
||
|
||
/* ----------------------------------------------------------- | ||
Queue of pages with free blocks | ||
----------------------------------------------------------- */ | ||
|
||
uint8_t _mi_bin(size_t size) { | ||
return mi_bin(size); | ||
} | ||
|
||
size_t _mi_bin_size(uint8_t bin) { | ||
return _mi_heap_empty.pages[bin].block_size; | ||
} | ||
|
||
// Good size for allocation | ||
size_t mi_good_size(size_t size) mi_attr_noexcept { | ||
if (size <= MI_MEDIUM_OBJ_SIZE_MAX) { | ||
return _mi_bin_size(mi_bin(size)); | ||
} | ||
else { | ||
return _mi_align_up(size,_mi_os_page_size()); | ||
} | ||
} | ||
|
||
#if (MI_DEBUG>1) | ||
static bool mi_page_queue_contains(mi_page_queue_t* queue, const mi_page_t* page) { | ||
mi_assert_internal(page != NULL); | ||
mi_page_t* list = queue->first; | ||
while (list != NULL) { | ||
mi_assert_internal(list->next == NULL || list->next->prev == list); | ||
mi_assert_internal(list->prev == NULL || list->prev->next == list); | ||
if (list == page) break; | ||
list = list->next; | ||
} | ||
return (list == page); | ||
} | ||
|
||
#endif | ||
|
||
#if (MI_DEBUG>1) | ||
static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t* pq) { | ||
return (pq >= &heap->pages[0] && pq <= &heap->pages[MI_BIN_FULL]); | ||
} | ||
#endif | ||
|
||
static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) { | ||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size)); | ||
mi_heap_t* heap = mi_page_heap(page); | ||
mi_assert_internal(heap != NULL && bin <= MI_BIN_FULL); | ||
mi_page_queue_t* pq = &heap->pages[bin]; | ||
mi_assert_internal(bin >= MI_BIN_HUGE || page->xblock_size == pq->block_size); | ||
mi_assert_expensive(mi_page_queue_contains(pq, page)); | ||
return pq; | ||
} | ||
|
||
static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) { | ||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size)); | ||
mi_assert_internal(bin <= MI_BIN_FULL); | ||
mi_page_queue_t* pq = &heap->pages[bin]; | ||
mi_assert_internal(mi_page_is_in_full(page) || page->xblock_size == pq->block_size); | ||
return pq; | ||
} | ||
|
||
// The current small page array is for efficiency and for each | ||
// small size (up to 256) it points directly to the page for that | ||
// size without having to compute the bin. This means when the | ||
// current free page queue is updated for a small bin, we need to update a | ||
// range of entries in `_mi_page_small_free`. | ||
static inline void mi_heap_queue_first_update(mi_heap_t* heap, const mi_page_queue_t* pq) { | ||
mi_assert_internal(mi_heap_contains_queue(heap,pq)); | ||
size_t size = pq->block_size; | ||
if (size > MI_SMALL_SIZE_MAX) return; | ||
|
||
mi_page_t* page = pq->first; | ||
if (pq->first == NULL) page = (mi_page_t*)&_mi_page_empty; | ||
|
||
// find index in the right direct page array | ||
size_t start; | ||
size_t idx = _mi_wsize_from_size(size); | ||
mi_page_t** pages_free = heap->pages_free_direct; | ||
|
||
if (pages_free[idx] == page) return; // already set | ||
|
||
// find start slot | ||
if (idx<=1) { | ||
start = 0; | ||
} | ||
else { | ||
// find previous size; due to minimal alignment upto 3 previous bins may need to be skipped | ||
uint8_t bin = mi_bin(size); | ||
const mi_page_queue_t* prev = pq - 1; | ||
while( bin == mi_bin(prev->block_size) && prev > &heap->pages[0]) { | ||
prev--; | ||
} | ||
start = 1 + _mi_wsize_from_size(prev->block_size); | ||
if (start > idx) start = idx; | ||
} | ||
|
||
// set size range to the right page | ||
mi_assert(start <= idx); | ||
for (size_t sz = start; sz <= idx; sz++) { | ||
pages_free[sz] = page; | ||
} | ||
} | ||
|
||
/* | ||
static bool mi_page_queue_is_empty(mi_page_queue_t* queue) { | ||
return (queue->first == NULL); | ||
} | ||
*/ | ||
|
||
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) { | ||
mi_assert_internal(page != NULL); | ||
mi_assert_expensive(mi_page_queue_contains(queue, page)); | ||
mi_assert_internal(page->xblock_size == queue->block_size || (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); | ||
mi_heap_t* heap = mi_page_heap(page); | ||
|
||
if (page->prev != NULL) page->prev->next = page->next; | ||
if (page->next != NULL) page->next->prev = page->prev; | ||
if (page == queue->last) queue->last = page->prev; | ||
if (page == queue->first) { | ||
queue->first = page->next; | ||
// update first | ||
mi_assert_internal(mi_heap_contains_queue(heap, queue)); | ||
mi_heap_queue_first_update(heap,queue); | ||
} | ||
heap->page_count--; | ||
page->next = NULL; | ||
page->prev = NULL; | ||
// mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), NULL); | ||
mi_page_set_in_full(page,false); | ||
} | ||
|
||
|
||
static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) { | ||
mi_assert_internal(mi_page_heap(page) == heap); | ||
mi_assert_internal(!mi_page_queue_contains(queue, page)); | ||
#if MI_HUGE_PAGE_ABANDON | ||
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE); | ||
#endif | ||
mi_assert_internal(page->xblock_size == queue->block_size || | ||
(page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX) || | ||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); | ||
|
||
mi_page_set_in_full(page, mi_page_queue_is_full(queue)); | ||
// mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), heap); | ||
page->next = queue->first; | ||
page->prev = NULL; | ||
if (queue->first != NULL) { | ||
mi_assert_internal(queue->first->prev == NULL); | ||
queue->first->prev = page; | ||
queue->first = page; | ||
} | ||
else { | ||
queue->first = queue->last = page; | ||
} | ||
|
||
// update direct | ||
mi_heap_queue_first_update(heap, queue); | ||
heap->page_count++; | ||
} | ||
|
||
|
||
static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) { | ||
mi_assert_internal(page != NULL); | ||
mi_assert_expensive(mi_page_queue_contains(from, page)); | ||
mi_assert_expensive(!mi_page_queue_contains(to, page)); | ||
|
||
mi_assert_internal((page->xblock_size == to->block_size && page->xblock_size == from->block_size) || | ||
(page->xblock_size == to->block_size && mi_page_queue_is_full(from)) || | ||
(page->xblock_size == from->block_size && mi_page_queue_is_full(to)) || | ||
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) || | ||
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_full(to))); | ||
|
||
mi_heap_t* heap = mi_page_heap(page); | ||
if (page->prev != NULL) page->prev->next = page->next; | ||
if (page->next != NULL) page->next->prev = page->prev; | ||
if (page == from->last) from->last = page->prev; | ||
if (page == from->first) { | ||
from->first = page->next; | ||
// update first | ||
mi_assert_internal(mi_heap_contains_queue(heap, from)); | ||
mi_heap_queue_first_update(heap, from); | ||
} | ||
|
||
page->prev = to->last; | ||
page->next = NULL; | ||
if (to->last != NULL) { | ||
mi_assert_internal(heap == mi_page_heap(to->last)); | ||
to->last->next = page; | ||
to->last = page; | ||
} | ||
else { | ||
to->first = page; | ||
to->last = page; | ||
mi_heap_queue_first_update(heap, to); | ||
} | ||
|
||
mi_page_set_in_full(page, mi_page_queue_is_full(to)); | ||
} | ||
|
||
// Only called from `mi_heap_absorb`. | ||
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) { | ||
mi_assert_internal(mi_heap_contains_queue(heap,pq)); | ||
mi_assert_internal(pq->block_size == append->block_size); | ||
|
||
if (append->first==NULL) return 0; | ||
|
||
// set append pages to new heap and count | ||
size_t count = 0; | ||
for (mi_page_t* page = append->first; page != NULL; page = page->next) { | ||
// inline `mi_page_set_heap` to avoid wrong assertion during absorption; | ||
// in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive. | ||
mi_atomic_store_release(&page->xheap, (uintptr_t)heap); | ||
// set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a | ||
// side effect that it spins until any DELAYED_FREEING is finished. This ensures | ||
// that after appending only the new heap will be used for delayed free operations. | ||
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false); | ||
count++; | ||
} | ||
|
||
if (pq->last==NULL) { | ||
// take over afresh | ||
mi_assert_internal(pq->first==NULL); | ||
pq->first = append->first; | ||
pq->last = append->last; | ||
mi_heap_queue_first_update(heap, pq); | ||
} | ||
else { | ||
// append to end | ||
mi_assert_internal(pq->last!=NULL); | ||
mi_assert_internal(append->first!=NULL); | ||
pq->last->next = append->first; | ||
append->first->prev = pq->last; | ||
pq->last = append->last; | ||
} | ||
return count; | ||
} |