diff --git a/doc/determinism.md b/doc/determinism.md new file mode 100644 index 0000000000..7d87841f66 --- /dev/null +++ b/doc/determinism.md @@ -0,0 +1,13 @@ +# Running DeepMD in full deterministic mode + +With the default settings, DeepMD does not guarantee that two successive trainings using the same data will return the same model parameters. The results will also depend on the processing units GPU vs CPU. Variations might also be observed between different families of GPUs. This document explains how to set up DeepMD runs to get reproducible results for a given set of training data and hardware architecture. It only applies to the forces and stress calculations during the training and inference phases. + +The GPU kernels calculating the forces and stress in DeepMD are deterministic. Calls to the TensorFlow API, however, do not guarantee that unless a set of environment variables affecting its execution are set up at runtime or if specific API calls are used during the TensorFlow initialization steps. The most important environment variable is `TF_DETERMINISTIC_OPS` that selects the deterministic variants of TensorFlow GPU functions if set to 1. Two other variables controlling the TensorFlow threading; `TF_INTER_OP_PARALLELISM_THREADS` and `TF_INTRA_OP_PARALLELISM_THREADS`; should be set to 0. More information about running TensorFlow in deterministic mode and what it implies, can be found [here](https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism). The `OMP_NUM_THREADS` variable seems to have less or no impact when the GPU version of DeepMD is used. + +Adding these three lines of code in the run scripts is enough to get reproducible results on the same hardware. + +```[sh] +export TF_DETERMINISTIC_OPS=1 +export TF_INTER_OP_PARALLELISM_THREADS=0 +export TF_INTRA_OP_PARALLELISM_THREADS=0 +``` diff --git a/source/lib/src/gpu/prod_force.cu b/source/lib/src/gpu/prod_force.cu index 7b1359b3b0..1bb8a711eb 100644 --- a/source/lib/src/gpu/prod_force.cu +++ b/source/lib/src/gpu/prod_force.cu @@ -43,76 +43,117 @@ __global__ void force_deriv_wrt_center_atom(FPTYPE* force, } } -template -__global__ void force_deriv_wrt_neighbors_a(FPTYPE* force, - const FPTYPE* net_deriv, - const FPTYPE* in_deriv, - const int* nlist, - const int nloc, - const int nall, - const int nnei) { - // idy -> nnei - const int_64 idx = blockIdx.x; - const unsigned int idy = blockIdx.y * blockDim.x + threadIdx.x; - const unsigned int idz = threadIdx.y; - const int ndescrpt = nnei * 4; - if (idy >= nnei) { - return; - } - // deriv wrt neighbors - int j_idx = nlist[idx * nnei + idy]; - if (j_idx < 0) { +template +__global__ void force_deriv_wrt_neighbors(FPTYPE* force, + const FPTYPE* net_deriv, + const FPTYPE* in_deriv, + const int* nlist, + const int nframes, + const int nloc, + const int nall, + const int nnei) { + // limited to 2 billions atoms and 2 billions frames + const int atom_id = blockIdx.x; + const int frame_id = blockIdx.z * gridDim.y + blockIdx.y; + + if (frame_id >= nframes) { return; } - FPTYPE force_tmp = (FPTYPE)0.; - for (int idw = 0; idw < 4; ++idw) { - force_tmp += net_deriv[idx * ndescrpt + idy * 4 + idw] * - in_deriv[idx * ndescrpt * 3 + (idy * 4 + idw) * 3 + idz]; + + const int ndescrpt = nnei * ((radial_only_) ? (1) : (4)); + + // define various pointers for a specific frame. + const FPTYPE* frame_net_deriv_ = &net_deriv[frame_id * nloc * ndescrpt]; + const FPTYPE* frame_in_deriv_ = &in_deriv[frame_id * nloc * ndescrpt * 3]; + const int* frame_neighbor_list_ = &nlist[frame_id * nnei * nloc]; + FPTYPE force_tmp[3] = {(FPTYPE)0., (FPTYPE)0., (FPTYPE)0.}; + + for (int neighbor_id = threadIdx.x; neighbor_id < nnei; + neighbor_id += blockDim.x) { + // collect all terms $\partial E_j / \partial D_{ji} \nabla_R_j D_{ji}$ + // where the atom i is a neighbor of the atom j. + // + // Go through all neighbors of atom i, locate the position of + // the atom i in the neighbor list of the atom j and retrieve all necessary + // information. + + const int atom_j = frame_neighbor_list_[atom_id * nnei + neighbor_id]; + + // The neighbors of a given atom are sorted by type and each resulting list + // is separated from the other by a series of -1. More details about the + // sorting can be found in https://doi.org/10.1016/j.cpc.2020.107624 + // + // To illustrate this, take the neigbhors of a given atom of type a (in a + // system with two atoms type a and b) deepmd stores the neighbors as + // + // [neighbors list of type a], -1, -1, -1, ...., [neighbor list of type b], + // -1, -1, -1, ..... + + if (atom_j < 0) { + continue; + } + + const int* nei_nei_list_ = &frame_neighbor_list_[atom_j * nnei]; + int atom_id_position = 0; + + // search the index of the atom i in the local neighbor list of atom j + for (atom_id_position = 0; atom_id_position < nnei; atom_id_position++) { + if (nei_nei_list_[atom_id_position] == atom_id) { + break; + } + } + + const int offset_j = + (atom_j * nnei + atom_id_position) * ((radial_only_) ? (1) : (4)); + for (int idw = 0; idw < ((radial_only_) ? (1) : (4)); ++idw) { + const FPTYPE cst1 = frame_net_deriv_[offset_j + idw]; + force_tmp[0] += cst1 * in_deriv[(offset_j + idw) * 3 + 0]; + force_tmp[1] += cst1 * in_deriv[(offset_j + idw) * 3 + 1]; + force_tmp[2] += cst1 * in_deriv[(offset_j + idw) * 3 + 2]; + } } - const int_64 kk = idx / nloc; // frame index - atomicAdd(force + kk * nall * 3 + j_idx * 3 + idz, force_tmp); -} -template -__global__ void force_deriv_wrt_neighbors_r(FPTYPE* force, - const FPTYPE* net_deriv, - const FPTYPE* in_deriv, - const int* nlist, - const int nloc, - const int nall, - const int nnei) { - // idy -> nnei - const int_64 idx = blockIdx.x; - const unsigned int idy = blockIdx.y * blockDim.x + threadIdx.x; - const unsigned int idz = threadIdx.y; - const int ndescrpt = nnei * 1; - if (idy >= nnei) { - return; + __shared__ FPTYPE fx[shared_memory_block_]; + __shared__ FPTYPE fy[shared_memory_block_]; + __shared__ FPTYPE fz[shared_memory_block_]; + + fx[threadIdx.x] = force_tmp[0]; + fy[threadIdx.x] = force_tmp[1]; + fz[threadIdx.x] = force_tmp[2]; + __syncthreads(); + + // do the final reduction + for (int tt = shared_memory_block_ / 2; tt > 0; tt >>= 1) { + if (threadIdx.x < tt) { + fx[threadIdx.x] += fx[threadIdx.x + tt]; + fy[threadIdx.x] += fy[threadIdx.x + tt]; + fz[threadIdx.x] += fz[threadIdx.x + tt]; + } + __syncthreads(); } - // deriv wrt neighbors - int j_idx = nlist[idx * nnei + idy]; - if (j_idx < 0) { - return; + + /* Note the sign difference between the formula in the PRL paper and the code. + it is due to \nabla_R_j D_{ji} = -\nabla_R_i D_{ji} */ + if (threadIdx.x == 0) { + const int64_t offset = (frame_id * nall + atom_id) * 3; + force[offset] += fx[0]; + force[offset + 1] += fy[0]; + force[offset + 2] += fz[0]; } - const int_64 kk = idx / nloc; // frame index - atomicAdd(force + kk * nall * 3 + j_idx * 3 + idz, - net_deriv[idx * ndescrpt + idy] * - in_deriv[idx * ndescrpt * 3 + idy * 3 + idz]); } -namespace deepmd { -template -void prod_force_a_gpu(FPTYPE* force, - const FPTYPE* net_deriv, - const FPTYPE* in_deriv, - const int* nlist, - const int nloc, - const int nall, - const int nnei, - const int nframes) { +template +void prod_force_a_r_gpu(FPTYPE* force, + const FPTYPE* net_deriv, + const FPTYPE* in_deriv, + const int* nlist, + const int nloc, + const int nall, + const int nnei, + const int nframes) { DPErrcheck(gpuGetLastError()); DPErrcheck(gpuDeviceSynchronize()); - const int ndescrpt = nnei * 4; + const int ndescrpt = nnei * ((radial_only_) ? (1) : (4)); DPErrcheck(gpuMemset(force, 0, sizeof(FPTYPE) * nframes * nall * 3)); force_deriv_wrt_center_atom<<>>( @@ -120,15 +161,29 @@ void prod_force_a_gpu(FPTYPE* force, DPErrcheck(gpuGetLastError()); DPErrcheck(gpuDeviceSynchronize()); - const int LEN = 64; - const int nblock = (nnei + LEN - 1) / LEN; - dim3 block_grid(nframes * nloc, nblock); - dim3 thread_grid(LEN, 3); - force_deriv_wrt_neighbors_a<<>>( - force, net_deriv, in_deriv, nlist, nloc, nall, nnei); + const int sqrt_nframes = sqrt(nframes); + dim3 block_grid(nloc, sqrt_nframes + 1, sqrt_nframes + 1); + // to accomodate AMD GPU + dim3 thread_grid(64, 1, 1); + force_deriv_wrt_neighbors + <<>>(force, net_deriv, in_deriv, nlist, nframes, + nloc, nall, nnei); DPErrcheck(gpuGetLastError()); DPErrcheck(gpuDeviceSynchronize()); } +namespace deepmd { +template +void prod_force_a_gpu(FPTYPE* force, + const FPTYPE* net_deriv, + const FPTYPE* in_deriv, + const int* nlist, + const int nloc, + const int nall, + const int nnei, + const int nframes) { + prod_force_a_r_gpu(force, net_deriv, in_deriv, nlist, nloc, + nall, nnei, nframes); +} template void prod_force_r_gpu(FPTYPE* force, @@ -139,24 +194,8 @@ void prod_force_r_gpu(FPTYPE* force, const int nall, const int nnei, const int nframes) { - DPErrcheck(gpuGetLastError()); - DPErrcheck(gpuDeviceSynchronize()); - const int ndescrpt = nnei * 1; - DPErrcheck(gpuMemset(force, 0, sizeof(FPTYPE) * nframes * nall * 3)); - - force_deriv_wrt_center_atom<<>>( - force, net_deriv, in_deriv, ndescrpt, nloc, nall); - DPErrcheck(gpuGetLastError()); - DPErrcheck(gpuDeviceSynchronize()); - - const int LEN = 64; - const int nblock = (nnei + LEN - 1) / LEN; - dim3 block_grid(nframes * nloc, nblock); - dim3 thread_grid(LEN, 3); - force_deriv_wrt_neighbors_r<<>>( - force, net_deriv, in_deriv, nlist, nloc, nall, nnei); - DPErrcheck(gpuGetLastError()); - DPErrcheck(gpuDeviceSynchronize()); + prod_force_a_r_gpu(force, net_deriv, in_deriv, nlist, nloc, + nall, nnei, nframes); } template void prod_force_a_gpu(float* force, diff --git a/source/lib/src/gpu/prod_virial.cu b/source/lib/src/gpu/prod_virial.cu index ab9c5326e3..42606e7374 100644 --- a/source/lib/src/gpu/prod_virial.cu +++ b/source/lib/src/gpu/prod_virial.cu @@ -26,112 +26,140 @@ __global__ void atom_virial_reduction(FPTYPE* virial, } } -template -__global__ void virial_deriv_wrt_neighbors_a(FPTYPE* virial, - FPTYPE* atom_virial, - const FPTYPE* net_deriv, - const FPTYPE* in_deriv, - const FPTYPE* rij, - const int* nlist, - const int nloc, - const int nnei) { - // idx -> nloc - // idy -> nnei - // idz = dd0 * 3 + dd1 - // dd0 = idz / 3 - // dd1 = idz % 3 - const int_64 idx = blockIdx.x; - const unsigned int idy = blockIdx.y * blockDim.x + threadIdx.x; - const unsigned int idz = threadIdx.y; - const int ndescrpt = nnei * 4; - if (idy >= nnei) { - return; - } - int j_idx = nlist[idx * nnei + idy]; - if (j_idx < 0) { - return; - } - // atomicAdd( - // virial + idz, - // net_deriv[idx * ndescrpt + idy * 4 + idw] * rij[idx * nnei * 3 + idy * 3 - // + idz / 3] * in_deriv[idx * ndescrpt * 3 + (idy * 4 + idw) * 3 + idz % - // 3]); - FPTYPE virial_tmp = (FPTYPE)0.; - for (int idw = 0; idw < 4; ++idw) { - virial_tmp += net_deriv[idx * ndescrpt + idy * 4 + idw] * - rij[idx * nnei * 3 + idy * 3 + idz % 3] * - in_deriv[idx * ndescrpt * 3 + (idy * 4 + idw) * 3 + idz / 3]; +template +__global__ void virial_deriv_wrt_neighbors(FPTYPE* virial, + FPTYPE* atom_virial, + const FPTYPE* net_deriv, + const FPTYPE* in_deriv, + const FPTYPE* rij, + const int* nlist, + const int nloc, + const int nnei) { + const int atom_i = blockIdx.x; + const int frame_id = 0; + const int ndescrpt = nnei * ((radial_only_) ? (1) : (4)); + + // define various pointers to for a specific frame. + const FPTYPE* frame_net_deriv_ = &net_deriv[frame_id * nloc * ndescrpt]; + const FPTYPE* frame_in_deriv_ = &in_deriv[frame_id * nloc * ndescrpt * 3]; + const int* frame_neighbor_list_ = &nlist[frame_id * nnei * nloc]; + + FPTYPE virial_tmp[9]; + memset(virial_tmp, 0, sizeof(FPTYPE) * 9); + + for (int neighbor_id = threadIdx.x; neighbor_id < nnei; + neighbor_id += blockDim.x) { + // Go through all neighbors of atom i, locate the position of the atom i in + // the neighbor list of the atom j and retrieve all necessary information. + + const int atom_j = frame_neighbor_list_[atom_i * nnei + neighbor_id]; + + if (atom_j < 0) { + continue; + } + + const int* nei_nei_list_ = &frame_neighbor_list_[atom_j * nnei]; + int atom_id_position = 0; + /* search for the atom i position in the neighbors list of atom_j */ + for (atom_id_position = 0; atom_id_position < nnei; atom_id_position++) { + if (nei_nei_list_[atom_id_position] == atom_i) { + break; + } + } + + /* take the rij from the atom i NOT the atom j as the coordinates system is + locally dependent */ + const FPTYPE rij_[3] = {rij[(atom_j * nnei + atom_id_position) * 3], + rij[(atom_j * nnei + atom_id_position) * 3 + 1], + rij[(atom_j * nnei + atom_id_position) * 3 + 2]}; + const int64_t offset_j = + (atom_j * nnei + atom_id_position) * ((radial_only_) ? (1) : (4)); + + for (int idw = 0; idw < ((radial_only_) ? (1) : (4)); idw++) { + const FPTYPE cnd = net_deriv[offset_j + idw]; + const FPTYPE in_der[3] = {in_deriv[(offset_j + idw) * 3 + 0], + in_deriv[(offset_j + idw) * 3 + 1], + in_deriv[(offset_j + idw) * 3 + 2]}; + + virial_tmp[0] += cnd * rij_[0] * in_der[0]; + virial_tmp[1] += cnd * rij_[1] * in_der[0]; + virial_tmp[2] += cnd * rij_[2] * in_der[0]; + virial_tmp[3] += cnd * rij_[0] * in_der[1]; + virial_tmp[4] += cnd * rij_[1] * in_der[1]; + virial_tmp[5] += cnd * rij_[2] * in_der[1]; + virial_tmp[6] += cnd * rij_[0] * in_der[2]; + virial_tmp[7] += cnd * rij_[1] * in_der[2]; + virial_tmp[8] += cnd * rij_[2] * in_der[2]; + } } - atomicAdd(atom_virial + j_idx * 9 + idz, virial_tmp); -} -template -__global__ void virial_deriv_wrt_neighbors_r(FPTYPE* virial, - FPTYPE* atom_virial, - const FPTYPE* net_deriv, - const FPTYPE* in_deriv, - const FPTYPE* rij, - const int* nlist, - const int nloc, - const int nnei) { - // idx -> nloc - // idy -> nnei - // idz = dd0 * 3 + dd1 - // dd0 = idz / 3 - // dd1 = idz % 3 - const int_64 idx = blockIdx.x; - const unsigned int idy = blockIdx.y * blockDim.x + threadIdx.x; - const unsigned int idz = threadIdx.y; - const int ndescrpt = nnei * 1; - - if (idy >= nnei) { - return; + __syncthreads(); + __shared__ FPTYPE vab[shared_mem_block_size_]; + + /* do the reduction over the thread block component by component */ + for (int i = 0; i < 9; i++) { + vab[threadIdx.x] = virial_tmp[i]; + __syncthreads(); + + for (int tt = blockDim.x / 2; tt > 0; tt >>= 1) { + if (threadIdx.x < tt) { + vab[threadIdx.x] += vab[threadIdx.x + tt]; + } + __syncthreads(); + } + virial_tmp[i] = vab[0]; } - int j_idx = nlist[idx * nnei + idy]; - if (j_idx < 0) { - return; + + if (threadIdx.x < 9) { + atom_virial[9 * atom_i + threadIdx.x] = virial_tmp[threadIdx.x]; } - // atomicAdd( - // virial + idz, - // net_deriv[idx * ndescrpt + idy * 4 + idw] * rij[idx * nnei * 3 + idy * 3 - // + idz / 3] * in_deriv[idx * ndescrpt * 3 + (idy * 4 + idw) * 3 + idz % - // 3]); - atomicAdd(atom_virial + j_idx * 9 + idz, - net_deriv[idx * ndescrpt + idy] * - rij[idx * nnei * 3 + idy * 3 + idz % 3] * - in_deriv[idx * ndescrpt * 3 + idy * 3 + idz / 3]); } -namespace deepmd { -template -void prod_virial_a_gpu(FPTYPE* virial, - FPTYPE* atom_virial, - const FPTYPE* net_deriv, - const FPTYPE* in_deriv, - const FPTYPE* rij, - const int* nlist, - const int nloc, - const int nall, - const int nnei) { +template +void prod_virial_a_r_gpu(FPTYPE* virial, + FPTYPE* atom_virial, + const FPTYPE* net_deriv, + const FPTYPE* in_deriv, + const FPTYPE* rij, + const int* nlist, + const int nloc, + const int nall, + const int nnei) { DPErrcheck(gpuGetLastError()); DPErrcheck(gpuDeviceSynchronize()); DPErrcheck(gpuMemset(virial, 0, sizeof(FPTYPE) * 9)); DPErrcheck(gpuMemset(atom_virial, 0, sizeof(FPTYPE) * 9 * nall)); - const int LEN = 16; - int nblock = (nnei + LEN - 1) / LEN; - dim3 block_grid(nloc, nblock); - dim3 thread_grid(LEN, 9); - // compute virial of a frame - virial_deriv_wrt_neighbors_a<<>>( - virial, atom_virial, net_deriv, in_deriv, rij, nlist, nloc, nnei); + dim3 block_grid = dim3(nloc, 1, 1); + // to accomodate AMD warp size + dim3 thread_grid = dim3(64, 1, 1); + virial_deriv_wrt_neighbors + <<>>(virial, atom_virial, net_deriv, in_deriv, + rij, nlist, nloc, nnei); DPErrcheck(gpuGetLastError()); DPErrcheck(gpuDeviceSynchronize()); + // reduction atom_virial to virial atom_virial_reduction<<<9, TPB>>>(virial, atom_virial, nall); DPErrcheck(gpuGetLastError()); DPErrcheck(gpuDeviceSynchronize()); } +namespace deepmd { +template +void prod_virial_a_gpu(FPTYPE* virial, + FPTYPE* atom_virial, + const FPTYPE* net_deriv, + const FPTYPE* in_deriv, + const FPTYPE* rij, + const int* nlist, + const int nloc, + const int nall, + const int nnei) { + prod_virial_a_r_gpu(virial, atom_virial, net_deriv, in_deriv, + rij, nlist, nloc, nall, nnei); +} template void prod_virial_r_gpu(FPTYPE* virial, @@ -143,24 +171,8 @@ void prod_virial_r_gpu(FPTYPE* virial, const int nloc, const int nall, const int nnei) { - DPErrcheck(gpuGetLastError()); - DPErrcheck(gpuDeviceSynchronize()); - DPErrcheck(gpuMemset(virial, 0, sizeof(FPTYPE) * 9)); - DPErrcheck(gpuMemset(atom_virial, 0, sizeof(FPTYPE) * 9 * nall)); - - const int LEN = 16; - int nblock = (nnei + LEN - 1) / LEN; - dim3 block_grid(nloc, nblock); - dim3 thread_grid(LEN, 9); - // compute virial of a frame - virial_deriv_wrt_neighbors_r<<>>( - virial, atom_virial, net_deriv, in_deriv, rij, nlist, nloc, nnei); - DPErrcheck(gpuGetLastError()); - DPErrcheck(gpuDeviceSynchronize()); - // reduction atom_virial to virial - atom_virial_reduction<<<9, TPB>>>(virial, atom_virial, nall); - DPErrcheck(gpuGetLastError()); - DPErrcheck(gpuDeviceSynchronize()); + prod_virial_a_r_gpu(virial, atom_virial, net_deriv, in_deriv, + rij, nlist, nloc, nall, nnei); } template void prod_virial_a_gpu(float* virial,