Skip to content

Commit

Permalink
Customize grids for CPHF (pyscf#244)
Browse files Browse the repository at this point in the history
* add cphf_grids to rks

* update example

* build grids with mol

* unit test && merge master

* unit test

* unit test for TDDFT

* pass grids to response function
  • Loading branch information
wxj6000 authored Nov 13, 2024
1 parent ec72d46 commit 867c5b8
Show file tree
Hide file tree
Showing 11 changed files with 62 additions and 15 deletions.
1 change: 1 addition & 0 deletions examples/00-h2o.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@
# Compute Hessian
h = mf_GPU.Hessian()
h.auxbasis_response = 2 # 0: no aux contribution, 1: some contributions, 2: all
mf_GPU.cphf_grids.atom_grid = (50,194) # customize grids for solving CPSCF equation, SG1 by default
h_dft = h.kernel()

# harmonic analysis
Expand Down
5 changes: 4 additions & 1 deletion gpu4pyscf/df/hessian/tests/test_df_uks_hessian.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ def test_df_gga(self):

mf = mf.to_gpu()
hessobj = mf.Hessian()
hessobj.base.cphf_grids = hessobj.base.grids
hess_gpu = hessobj.kernel()
assert numpy.linalg.norm(hess_cpu - hess_gpu) < 1e-5

Expand All @@ -98,9 +99,11 @@ def test_df_mgga(self):

mf = mf.to_gpu()
hessobj = mf.Hessian()
hessobj.base.cphf_grids = hessobj.base.grids
hess_gpu = hessobj.kernel()
assert numpy.linalg.norm(hess_cpu - hess_gpu) < 1e-5

if __name__ == "__main__":
print("Full Tests for DF UKS Hessian")
unittest.main()
unittest.main()

9 changes: 9 additions & 0 deletions gpu4pyscf/dft/rks.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,14 @@ def __init__(self, xc='LDA,VWN'):
self.nlcgrids = gen_grid.Grids(self.mol)
self.nlcgrids.level = getattr(
__config__, 'dft_rks_RKS_nlcgrids_level', self.nlcgrids.level)

# Default CPHF grids is SG1 grids
# Reference:
# https://gaussian.com/integral/?tabid=1#Integral_keyword__Grid_option
self.cphf_grids = gen_grid.Grids(self.mol)
self.cphf_grids.prune = gen_grid.sg1_prune
self.cphf_grids.atom_grid = (50,194)

# Use rho to filter grids
self.small_rho_cutoff = getattr(
__config__, 'dft_rks_RKS_small_rho_cutoff', 1e-7)
Expand Down Expand Up @@ -293,6 +301,7 @@ def reset(self, mol=None):
hf.SCF.reset(self, mol)
self.grids.reset(mol)
self.nlcgrids.reset(mol)
self.cphf_grids.reset(mol)
self._numint.reset()
return self

Expand Down
1 change: 1 addition & 0 deletions gpu4pyscf/dft/uks.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ def reset(self, mol=None):
hf.SCF.reset(self, mol)
self.grids.reset(mol)
self.nlcgrids.reset(mol)
self.cphf_grids.reset(mol)
self._numint.reset()
return self

Expand Down
3 changes: 2 additions & 1 deletion gpu4pyscf/hessian/rhf.py
Original file line number Diff line number Diff line change
Expand Up @@ -562,7 +562,8 @@ def gen_vind(mf, mo_coeff, mo_occ):
mocc = mo_coeff[:,mo_occ>0]
nocc = mocc.shape[1]
mocc_2 = mocc * 2
vresp = mf.gen_response(mo_coeff, mo_occ, hermi=1)
grids = getattr(mf, 'cphf_grids', None)
vresp = mf.gen_response(mo_coeff, mo_occ, hermi=1, grids=grids)

def fx(mo1):
mo1 = cupy.asarray(mo1)
Expand Down
4 changes: 3 additions & 1 deletion gpu4pyscf/hessian/tests/test_rks_hessian.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,9 @@ def _check_vxc(method, xc='LDA'):
def _vs_cpu(mf, tol=1e-7):
mf.conv_tol_cpscf = 1e-8
ref = mf.Hessian().kernel()
e2_gpu = mf.Hessian().to_gpu().kernel()
hessobj = mf.Hessian().to_gpu()
hessobj.base.cphf_grids = hessobj.base.grids
e2_gpu = hessobj.kernel()
assert abs(ref - e2_gpu).max() < tol

class KnownValues(unittest.TestCase):
Expand Down
4 changes: 3 additions & 1 deletion gpu4pyscf/hessian/tests/test_uks_hessian.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,9 @@ def _check_vxc(method, xc='LDA'):
def _vs_cpu(mf, tol=1e-7):
mf.conv_tol_cpscf = 1e-8
ref = mf.Hessian().kernel()
e2_gpu = mf.Hessian().to_gpu().kernel()
hessobj = mf.Hessian().to_gpu()
hessobj.base.cphf_grids = hessobj.base.grids
e2_gpu = hessobj.kernel()
assert abs(ref - e2_gpu).max() < tol

class KnownValues(unittest.TestCase):
Expand Down
3 changes: 2 additions & 1 deletion gpu4pyscf/hessian/uhf.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,8 @@ def gen_vind(mf, mo_coeff, mo_occ):
moccb = mo_coeff[1][:,mo_occ[1]>0]
nocca = mocca.shape[1]
noccb = moccb.shape[1]
vresp = mf.gen_response(mo_coeff, mo_occ, hermi=1)
grids = getattr(mf, 'cphf_grids', None)
vresp = mf.gen_response(mo_coeff, mo_occ, hermi=1, grids=grids)

def fx(mo1):
mo1 = cupy.asarray(mo1)
Expand Down
25 changes: 17 additions & 8 deletions gpu4pyscf/scf/_response_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from gpu4pyscf.scf import hf, uhf

def _gen_rhf_response(mf, mo_coeff=None, mo_occ=None,
singlet=None, hermi=0, max_memory=None):
singlet=None, hermi=0, grids=None, max_memory=None):
'''Generate a function to compute the product of RHF response function and
RHF density matrices.
Expand All @@ -31,7 +31,12 @@ def _gen_rhf_response(mf, mo_coeff=None, mo_occ=None,
if mo_coeff is None: mo_coeff = mf.mo_coeff
if mo_occ is None: mo_occ = mf.mo_occ
mol = mf.mol

if isinstance(mf, hf.KohnShamDFT):
if grids is None:
grids = mf.grids
if grids and grids.coords is None:
grids.build(mol=mol, with_non0tab=False, sort_grids=True)
ni = mf._numint
ni.libxc.test_deriv_order(mf.xc, 2, raise_error=True)
if getattr(mf, 'nlc', '') != '':
Expand All @@ -51,7 +56,7 @@ def _gen_rhf_response(mf, mo_coeff=None, mo_occ=None,
else:
spin = 1
rho0, vxc, fxc = ni.cache_xc_kernel(
mol, mf.grids, mf.xc, mo_coeff, mo_occ, spin, max_memory=max_memory)
mol, grids, mf.xc, mo_coeff, mo_occ, spin, max_memory=max_memory)
dm0 = None

if singlet is None:
Expand All @@ -61,7 +66,7 @@ def vind(dm1):
if hermi == 2:
v1 = cupy.zeros_like(dm1)
else:
v1 = ni.nr_rks_fxc(mol, mf.grids, mf.xc, dm0, dm1, 0, hermi,
v1 = ni.nr_rks_fxc(mol, grids, mf.xc, dm0, dm1, 0, hermi,
rho0, vxc, fxc, max_memory=max_memory)
if hybrid:
if hermi != 2:
Expand All @@ -83,7 +88,7 @@ def vind(dm1):
v1 = cupy.zeros_like(dm1)
else:
# nr_rks_fxc_st requires alpha of dm1, dm1*.5 should be scaled
v1 = ni.nr_rks_fxc_st(mol, mf.grids, mf.xc, dm0, dm1, 0, True,
v1 = ni.nr_rks_fxc_st(mol, grids, mf.xc, dm0, dm1, 0, True,
rho0, vxc, fxc, max_memory=max_memory)
if hybrid:
if hermi != 2:
Expand All @@ -105,7 +110,7 @@ def vind(dm1):
v1 = cupy.zeros_like(dm1)
else:
# nr_rks_fxc_st requires alpha of dm1, dm1*.5 should be scaled
v1 = ni.nr_rks_fxc_st(mol, mf.grids, mf.xc, dm0, dm1, 0, False,
v1 = ni.nr_rks_fxc_st(mol, grids, mf.xc, dm0, dm1, 0, False,
rho0, vxc, fxc, max_memory=max_memory)
if hybrid:
vk = mf.get_k(mol, dm1, hermi=hermi)
Expand All @@ -128,7 +133,7 @@ def vind(dm1):


def _gen_uhf_response(mf, mo_coeff=None, mo_occ=None,
with_j=True, hermi=0, max_memory=None):
with_j=True, hermi=0, grids=None, max_memory=None):
'''Generate a function to compute the product of UHF response function and
UHF density matrices.
'''
Expand All @@ -137,6 +142,10 @@ def _gen_uhf_response(mf, mo_coeff=None, mo_occ=None,
if mo_occ is None: mo_occ = mf.mo_occ
mol = mf.mol
if isinstance(mf, hf.KohnShamDFT):
if grids is None:
grids = mf.grids
if grids and grids.coords is None:
grids.build(mol=mol, with_non0tab=False, sort_grids=True)
ni = mf._numint
ni.libxc.test_deriv_order(mf.xc, 2, raise_error=True)
if mf.do_nlc():
Expand All @@ -146,7 +155,7 @@ def _gen_uhf_response(mf, mo_coeff=None, mo_occ=None,
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, mol.spin)
hybrid = ni.libxc.is_hybrid_xc(mf.xc)

rho0, vxc, fxc = ni.cache_xc_kernel(mol, mf.grids, mf.xc,
rho0, vxc, fxc = ni.cache_xc_kernel(mol, grids, mf.xc,
mo_coeff, mo_occ, 1)
dm0 = None

Expand All @@ -158,7 +167,7 @@ def vind(dm1):
if hermi == 2:
v1 = cupy.zeros_like(dm1)
else:
v1 = ni.nr_uks_fxc(mol, mf.grids, mf.xc, dm0, dm1, 0, hermi,
v1 = ni.nr_uks_fxc(mol, grids, mf.xc, dm0, dm1, 0, hermi,
rho0, vxc, fxc, max_memory=max_memory)
if not hybrid:
if with_j:
Expand Down
11 changes: 10 additions & 1 deletion gpu4pyscf/tdscf/tests/test_tdrks.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,19 +39,25 @@ def setUpClass(cls):
mf_lda = mol.RKS().to_gpu().density_fit()
mf_lda.xc = 'lda, vwn'
mf_lda.grids.prune = None
mf_lda.cphf_grids = mf_lda.grids
cls.mf_lda = mf_lda.run(conv_tol=1e-10)

mf_bp86 = mol.RKS().to_gpu().density_fit()
mf_bp86.xc = 'b88,p86'
mf_bp86.grids.prune = None
mf_bp86.cphf_grids = mf_bp86.grids
cls.mf_bp86 = mf_bp86.run(conv_tol=1e-10)

mf_b3lyp = mol.RKS().to_gpu().density_fit()
mf_b3lyp.xc = 'b3lyp5'
mf_b3lyp.grids.prune = None
mf_b3lyp.cphf_grids = mf_b3lyp.grids
cls.mf_b3lyp = mf_b3lyp.run(conv_tol=1e-10)

cls.mf_m06l = mol.RKS().to_gpu().density_fit().run(xc='m06l', conv_tol=1e-10)
mf_m06l = mol.RKS().to_gpu().density_fit()
mf_m06l.xc = 'm06l'
mf_m06l.cphf_grids = mf_m06l.grids
cls.mf_m06l = mf_m06l.run(conv_tol=1e-10)

@classmethod
def tearDownClass(cls):
Expand Down Expand Up @@ -106,6 +112,7 @@ def test_tddft_b3lyp(self):
def test_tddft_camb3lyp(self):
mol = self.mol
mf = mol.RKS(xc='camb3lyp').run()
mf.cphf_grids = mf.grids
td = mf.TDDFT().to_gpu()
assert td.device == 'gpu'
td.conv_tol = 1e-5
Expand All @@ -119,6 +126,7 @@ def test_tda_b3lypg(self):
mf = mol.RKS()
mf.xc = 'b3lypg'
mf.grids.prune = None
mf.cphf_grids = mf.grids
mf.scf()
td = mf.TDA().to_gpu()
assert td.device == 'gpu'
Expand Down Expand Up @@ -172,6 +180,7 @@ def test_tda_rsh(self):
mf = mol.RKS()
mf.xc = 'wb97'
mf.kernel()
mf.cphf_grids = mf.grids
td = mf.TDA().to_gpu()
assert td.device == 'gpu'
e_td = td.set(nstates=5).kernel()[0]
Expand Down
11 changes: 10 additions & 1 deletion gpu4pyscf/tdscf/tests/test_tduks.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,17 +49,22 @@ def setUpClass(cls):

mf_lda = mol.UKS().set(xc='lda', conv_tol=1e-12).to_gpu()
mf_lda.grids.prune = None
mf_lda.cphf_grids = mf_lda.grids
cls.mf_lda = mf_lda.density_fit().run()

mf_bp86 = mol.UKS().set(xc='b88,p86', conv_tol=1e-12).to_gpu()
mf_bp86.grids.prune = None
mf_bp86.cphf_grids = mf_bp86.grids
cls.mf_bp86 = mf_bp86.density_fit().run()

mf_b3lyp = mol.UKS().set(xc='b3lyp5', conv_tol=1e-12).to_gpu()
mf_b3lyp.grids.prune = None
mf_b3lyp.cphf_grids = mf_b3lyp.grids
cls.mf_b3lyp = mf_b3lyp.density_fit().run()

cls.mf_m06l = mol.UKS().to_gpu().density_fit().run(xc='m06l')
mf_m06l = mol.UKS().to_gpu().density_fit().run(xc='m06l')
mf_m06l.cphf_grids = mf_m06l.grids
cls.mf_m06l = mf_m06l

@classmethod
def tearDownClass(cls):
Expand All @@ -76,6 +81,7 @@ def test_nohybrid_lda(self):

mol1 = self.mol1
mf = mol1.UKS().run(xc='lda, vwn_rpa').run()
mf.cphf_grids = mf.grids
td = mf.CasidaTDDFT().to_gpu()
assert td.device == 'gpu'
td.nstates = 5
Expand Down Expand Up @@ -112,6 +118,7 @@ def test_tddft_b88p86(self):

mol1 = self.mol1
mf = mol1.UKS().run(xc='b88,p86').run()
mf.cphf_grids = mf.grids
td = mf.TDDFT().to_gpu()
assert td.device == 'gpu'
es = td.kernel(nstates=5)[0]
Expand All @@ -130,6 +137,7 @@ def test_tddft_b3lyp(self):
def test_tddft_camb3lyp(self):
mol1 = self.mol1
mf = mol1.UKS(xc='camb3lyp').run()
mf.cphf_grids = mf.grids
td = mf.TDDFT().to_gpu()
assert td.device == 'gpu'
es = td.kernel(nstates=4)[0]
Expand Down Expand Up @@ -157,6 +165,7 @@ def test_tda_lda(self):

mol1 = self.mol1
mf = mol1.UKS().run(xc='lda,vwn').run()
mf.cphf_grids = mf.grids
td = mf.TDA().to_gpu()
assert td.device == 'gpu'
td.nstates = 5
Expand Down

0 comments on commit 867c5b8

Please sign in to comment.