Skip to content

Commit

Permalink
Merge branch 'devel' into docs-se_atten
Browse files Browse the repository at this point in the history
  • Loading branch information
wanghan-iapcm authored May 29, 2024
2 parents 6bc809f + 0afe8bf commit f1dcea8
Show file tree
Hide file tree
Showing 6 changed files with 18 additions and 17 deletions.
17 changes: 9 additions & 8 deletions .github/workflows/test_python.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,17 +42,18 @@ jobs:
- name: Get durations from cache
uses: actions/cache@v4
with:
path: test_durations
path: .test_durations
# the key must never match, even when restarting workflows, as that
# will cause durations to get out of sync between groups, the
# combined durations will be loaded if available
key: test-durations-split-${{ github.run_id }}-${{ github.run_number}}-${{ matrix.python }}-${{ matrix.group }}
key: test2-durations-split-${{ github.run_id }}-${{ github.run_number}}-${{ matrix.python }}-${{ matrix.group }}
restore-keys: |
test-durations-combined-${{ matrix.python }}-${{ github.sha }}
test-durations-combined-${{ matrix.python }}
- run: pytest --cov=deepmd source/tests --durations=0 --splits 6 --group ${{ matrix.group }} --store-durations --durations-path=.test_durations_${{ matrix.group }} --splitting-algorithm least_duration
test2-durations-combined-${{ matrix.python }}-${{ github.sha }}
test2-durations-combined-${{ matrix.python }}
- run: pytest --cov=deepmd source/tests --durations=0 --splits 6 --group ${{ matrix.group }} --store-durations --durations-path=.test_durations --splitting-algorithm least_duration
env:
NUM_WORKERS: 0
- run: mv .test_durations .test_durations_${{ matrix.group }}
- name: Upload partial durations
uses: actions/upload-artifact@v4
with:
Expand All @@ -77,15 +78,15 @@ jobs:
# key won't match during the first run for the given commit, but
# restore-key will if there's a previous stored durations file,
# so cache will both be loaded and stored
key: test-durations-combined-${{ matrix.python }}-${{ github.sha }}
restore-keys: test-durations-combined-${{ matrix.python }}
key: test2-durations-combined-${{ matrix.python }}-${{ github.sha }}
restore-keys: test2-durations-combined-${{ matrix.python }}
- name: Download artifacts
uses: actions/download-artifact@v4
with:
pattern: split-${{ matrix.python }}-*
merge-multiple: true
- name: Combine test durations
run: jq '. + input' .test_durations_* > .test_durations
run: jq -s add .test_durations_* > .test_durations
pass:
name: Pass testing Python
needs: [testpython, update_durations]
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ repos:
exclude: ^source/3rdparty
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.4.4
rev: v0.4.5
hooks:
- id: ruff
args: ["--fix"]
Expand Down
2 changes: 1 addition & 1 deletion deepmd/pt/utils/nlist.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,7 @@ def extend_coord_with_ghosts(
# +1: central cell
nbuff = torch.ceil(rcut / to_face).to(torch.long)
# 3
nbuff = torch.max(nbuff, dim=0, keepdim=False).values
nbuff = torch.amax(nbuff, dim=0) # faster than torch.max
nbuff_cpu = nbuff.cpu()
xi = torch.arange(-nbuff_cpu[0], nbuff_cpu[0] + 1, 1, device="cpu")
yi = torch.arange(-nbuff_cpu[1], nbuff_cpu[1] + 1, 1, device="cpu")
Expand Down
5 changes: 5 additions & 0 deletions deepmd/utils/batch_size.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,11 @@ def __init__(self, initial_batch_size: int = 1024, factor: float = 2.0) -> None:
self.maximum_working_batch_size = initial_batch_size
if self.is_gpu_available():
self.minimal_not_working_batch_size = 2**31
log.info(
"If you encounter the error 'an illegal memory access was encountered', this may be due to a TensorFlow issue. "
"To avoid this, set the environment variable DP_INFER_BATCH_SIZE to a smaller value than the last adjusted batch size. "
"The environment variable DP_INFER_BATCH_SIZE controls the inference batch size (nframes * natoms). "
)
else:
self.minimal_not_working_batch_size = (
self.maximum_working_batch_size + 1
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ lmp = [
"lammps~=2023.8.2.3.0",
]
ipi = [
"i-PI",
"ipi",
]
gui = [
"dpgui",
Expand Down
7 changes: 1 addition & 6 deletions source/tests/pt/model/test_polarizability_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,12 +269,7 @@ def test_permu(self):

def test_trans(self):
atype = self.atype.reshape(1, 5)
coord_s = torch.matmul(
torch.remainder(
torch.matmul(self.coord + self.shift, torch.linalg.inv(self.cell)), 1.0
),
self.cell,
)
coord_s = self.coord + self.shift
for fit_diag, scale in itertools.product([True, False], [None, self.scale]):
ft0 = PolarFittingNet(
self.nt,
Expand Down

0 comments on commit f1dcea8

Please sign in to comment.