This commit is contained in:
2026-01-08 19:47:32 +03:00
commit 4d7676a79e
89 changed files with 62260 additions and 0 deletions

36
00/INCAR Normal file
View File

@@ -0,0 +1,36 @@
SYSTEM = NEB H2 dissociation on Mo2C(100)
ENCUT = 415
PREC = Normal
ISMEAR = 0
SIGMA = 0.1
EDIFF = 1E-5
EDIFFG = -0.05
NSW = 200
IBRION = 3 # NEB
POTIM = 0.05
IOPT = 1 # Quick-min
LCLIMB = .TRUE. # Climbing image
SPRING = -5.0
IMAGES = 5 # 5 intermediate images
# Electronic relaxation
ALGO = Fast
NELM = 100
NELMIN = 4
LREAL = Auto
# Spin and vdW
ISPIN = 2
MAGMOM = 16*0 32*4 8*1 # C:0, Mo:4, H:1
LUSE_VDW = .TRUE.
IVDW = 11
LASPH = .TRUE.
# Parallelization
NCORES = 24
NPAR = 4
KPAR = 6
# Output
LWAVE = .TRUE.
LCHARG = .TRUE.

4
00/KPOINTS Normal file
View File

@@ -0,0 +1,4 @@
Automatic mesh
0
Monkhorst-Pack
2 4 4

68
00/POSCAR Normal file
View File

@@ -0,0 +1,68 @@
H2 far from Mo2C surface
1.00000000000000
17.7369999999999983 0.0000000000000000 0.0000000000000000
0.0000000000000000 12.0860000000000003 0.0000000000000000
0.0000000000000000 0.0000000000000000 10.4359999999999999
C Mo H
16 32 8
Selective dynamics
Direct
0.0000000000000000 0.1890000000000001 0.1250000000000000 F F F
0.0000000000000000 0.3109999999999999 0.3750000000000000 F F F
0.0000000000000000 0.6890000000000001 0.1250000000000000 F F F
0.0000000000000000 0.8109999999999999 0.3750000000000000 F F F
0.0000000000000000 0.1890000000000001 0.6250000000000000 F F F
0.0000000000000000 0.3109999999999999 0.8750000000000000 F F F
0.0000000000000000 0.6890000000000001 0.6250000000000000 F F F
0.0000000000000000 0.8109999999999999 0.8750000000000000 F F F
0.1320119929516228 0.0593457408891106 0.3747810153898691 T T T
0.1339040209268587 0.4382431092148634 0.1246005476156678 T T T
0.1346014460976624 0.5607842949985916 0.3745985782632549 T T T
0.1318895420043087 0.9396115085588336 0.1252676705134894 T T T
0.1339355733708381 0.0614996876186402 0.8750674941722374 T T T
0.1331797503730110 0.4386650366715458 0.6233685812881605 T T T
0.1342318012489764 0.5598322800816591 0.8735113412793347 T T T
0.1332329912804981 0.9394597661817066 0.6219321527751450 T T T
0.0656989344308485 0.0609999999999999 0.0394999999999968 F F F
0.0678354851440517 0.1890000000000001 0.2894999999999968 F F F
0.0678354851440517 0.3109999999999999 0.0394999999999968 F F F
0.0656989344308485 0.4390000000000001 0.2894999999999968 F F F
0.0656989344308485 0.5609999999999999 0.0394999999999968 F F F
0.0678354851440517 0.6890000000000001 0.2894999999999968 F F F
0.0678354851440517 0.8109999999999999 0.0394999999999968 F F F
0.0656989344308485 0.9390000000000001 0.2894999999999968 F F F
0.0656989344308485 0.0609999999999999 0.5394999999999968 F F F
0.0678354851440517 0.1890000000000001 0.7894999999999968 F F F
0.0678354851440517 0.3109999999999999 0.5394999999999968 F F F
0.0656989344308485 0.4390000000000001 0.7894999999999968 F F F
0.0656989344308485 0.5609999999999999 0.5394999999999968 F F F
0.0678354851440517 0.6890000000000001 0.7894999999999968 F F F
0.0678354851440517 0.8109999999999999 0.5394999999999968 F F F
0.0656989344308485 0.9390000000000001 0.7894999999999968 F F F
0.1899635985462743 0.0715452722381709 0.2056722342975337 T T T
0.1898701472331455 0.1926721066860241 0.4654860691830805 T T T
0.1890518958956355 0.3062937312891991 0.2197815432962685 T T T
0.1906584487652120 0.4229511228223711 0.4545465799678731 T T T
0.1906005809118210 0.5730889081693626 0.2024716224638729 T T T
0.1882343850141806 0.6932841047230595 0.4700914833922938 T T T
0.1899240393588936 0.8093603028954814 0.2175916994709843 T T T
0.1937641502147673 0.9295111734818007 0.4540488978640193 T T T
0.1907908420520039 0.0711821519253457 0.7037129217433928 T T T
0.1867802362246722 0.1982063494207132 0.9667270103755806 T T T
0.1891879480762782 0.3077078845148332 0.7201952360379164 T T T
0.1912388918870658 0.4251711384205735 0.9517544247373603 T T T
0.1966550503624373 0.5724834144836694 0.7011711849035441 T T T
0.1895418326796766 0.6922556995684713 0.9703854872411387 T T T
0.1868396807657923 0.8086011749407832 0.7199966590158274 T T T
0.1915129559134146 0.9256614081688295 0.9541573084958859 T T T
0.2500652879714319 0.8090456263635705 0.0545641575316365 T T T
0.2513392081223993 0.3046808670438062 0.0566045935240033 T T T
0.2479311638705096 0.0586269752411525 0.5320845315351295 T T T
0.2476814460919670 0.6895986355994584 0.8084849185407729 T T T
0.2492806440460436 0.3073980778169251 0.5556385058448301 T T T
0.2477965431497549 0.6936376688880892 0.3079168908739140 T T T
0.2835729538717008 0.5931327070274096 0.6083376880979403 T T T
0.2963206835887032 0.5407372680262799 0.6781487631358020 T T T
# H2 molecule far above surface (z ≈ 0.95)
0.5000000000000000 0.5000000000000000 0.9500000000000000 T T T
0.5000000000000000 0.5000000000000000 0.9300000000000000 T T T

6479
00/POTCAR Normal file

File diff suppressed because it is too large Load Diff

36
01/INCAR Normal file
View File

@@ -0,0 +1,36 @@
SYSTEM = NEB H2 dissociation on Mo2C(100)
ENCUT = 415
PREC = Normal
ISMEAR = 0
SIGMA = 0.1
EDIFF = 1E-5
EDIFFG = -0.05
NSW = 200
IBRION = 3 # NEB
POTIM = 0.05
IOPT = 1 # Quick-min
LCLIMB = .TRUE. # Climbing image
SPRING = -5.0
IMAGES = 5 # 5 intermediate images
# Electronic relaxation
ALGO = Fast
NELM = 100
NELMIN = 4
LREAL = Auto
# Spin and vdW
ISPIN = 2
MAGMOM = 16*0 32*4 8*1 # C:0, Mo:4, H:1
LUSE_VDW = .TRUE.
IVDW = 11
LASPH = .TRUE.
# Parallelization
NCORES = 24
NPAR = 4
KPAR = 6
# Output
LWAVE = .TRUE.
LCHARG = .TRUE.

4
01/KPOINTS Normal file
View File

@@ -0,0 +1,4 @@
Automatic mesh
0
Monkhorst-Pack
2 4 4

6479
01/POTCAR Normal file

File diff suppressed because it is too large Load Diff

36
02/INCAR Normal file
View File

@@ -0,0 +1,36 @@
SYSTEM = NEB H2 dissociation on Mo2C(100)
ENCUT = 415
PREC = Normal
ISMEAR = 0
SIGMA = 0.1
EDIFF = 1E-5
EDIFFG = -0.05
NSW = 200
IBRION = 3 # NEB
POTIM = 0.05
IOPT = 1 # Quick-min
LCLIMB = .TRUE. # Climbing image
SPRING = -5.0
IMAGES = 5 # 5 intermediate images
# Electronic relaxation
ALGO = Fast
NELM = 100
NELMIN = 4
LREAL = Auto
# Spin and vdW
ISPIN = 2
MAGMOM = 16*0 32*4 8*1 # C:0, Mo:4, H:1
LUSE_VDW = .TRUE.
IVDW = 11
LASPH = .TRUE.
# Parallelization
NCORES = 24
NPAR = 4
KPAR = 6
# Output
LWAVE = .TRUE.
LCHARG = .TRUE.

4
02/KPOINTS Normal file
View File

@@ -0,0 +1,4 @@
Automatic mesh
0
Monkhorst-Pack
2 4 4

6479
02/POTCAR Normal file

File diff suppressed because it is too large Load Diff

36
03/INCAR Normal file
View File

@@ -0,0 +1,36 @@
SYSTEM = NEB H2 dissociation on Mo2C(100)
ENCUT = 415
PREC = Normal
ISMEAR = 0
SIGMA = 0.1
EDIFF = 1E-5
EDIFFG = -0.05
NSW = 200
IBRION = 3 # NEB
POTIM = 0.05
IOPT = 1 # Quick-min
LCLIMB = .TRUE. # Climbing image
SPRING = -5.0
IMAGES = 5 # 5 intermediate images
# Electronic relaxation
ALGO = Fast
NELM = 100
NELMIN = 4
LREAL = Auto
# Spin and vdW
ISPIN = 2
MAGMOM = 16*0 32*4 8*1 # C:0, Mo:4, H:1
LUSE_VDW = .TRUE.
IVDW = 11
LASPH = .TRUE.
# Parallelization
NCORES = 24
NPAR = 4
KPAR = 6
# Output
LWAVE = .TRUE.
LCHARG = .TRUE.

4
03/KPOINTS Normal file
View File

@@ -0,0 +1,4 @@
Automatic mesh
0
Monkhorst-Pack
2 4 4

6479
03/POTCAR Normal file

File diff suppressed because it is too large Load Diff

36
04/INCAR Normal file
View File

@@ -0,0 +1,36 @@
SYSTEM = NEB H2 dissociation on Mo2C(100)
ENCUT = 415
PREC = Normal
ISMEAR = 0
SIGMA = 0.1
EDIFF = 1E-5
EDIFFG = -0.05
NSW = 200
IBRION = 3 # NEB
POTIM = 0.05
IOPT = 1 # Quick-min
LCLIMB = .TRUE. # Climbing image
SPRING = -5.0
IMAGES = 5 # 5 intermediate images
# Electronic relaxation
ALGO = Fast
NELM = 100
NELMIN = 4
LREAL = Auto
# Spin and vdW
ISPIN = 2
MAGMOM = 16*0 32*4 8*1 # C:0, Mo:4, H:1
LUSE_VDW = .TRUE.
IVDW = 11
LASPH = .TRUE.
# Parallelization
NCORES = 24
NPAR = 4
KPAR = 6
# Output
LWAVE = .TRUE.
LCHARG = .TRUE.

4
04/KPOINTS Normal file
View File

@@ -0,0 +1,4 @@
Automatic mesh
0
Monkhorst-Pack
2 4 4

6479
04/POTCAR Normal file

File diff suppressed because it is too large Load Diff

36
05/INCAR Normal file
View File

@@ -0,0 +1,36 @@
SYSTEM = NEB H2 dissociation on Mo2C(100)
ENCUT = 415
PREC = Normal
ISMEAR = 0
SIGMA = 0.1
EDIFF = 1E-5
EDIFFG = -0.05
NSW = 200
IBRION = 3 # NEB
POTIM = 0.05
IOPT = 1 # Quick-min
LCLIMB = .TRUE. # Climbing image
SPRING = -5.0
IMAGES = 5 # 5 intermediate images
# Electronic relaxation
ALGO = Fast
NELM = 100
NELMIN = 4
LREAL = Auto
# Spin and vdW
ISPIN = 2
MAGMOM = 16*0 32*4 8*1 # C:0, Mo:4, H:1
LUSE_VDW = .TRUE.
IVDW = 11
LASPH = .TRUE.
# Parallelization
NCORES = 24
NPAR = 4
KPAR = 6
# Output
LWAVE = .TRUE.
LCHARG = .TRUE.

4
05/KPOINTS Normal file
View File

@@ -0,0 +1,4 @@
Automatic mesh
0
Monkhorst-Pack
2 4 4

6479
05/POTCAR Normal file

File diff suppressed because it is too large Load Diff

36
06/INCAR Normal file
View File

@@ -0,0 +1,36 @@
SYSTEM = NEB H2 dissociation on Mo2C(100)
ENCUT = 415
PREC = Normal
ISMEAR = 0
SIGMA = 0.1
EDIFF = 1E-5
EDIFFG = -0.05
NSW = 200
IBRION = 3 # NEB
POTIM = 0.05
IOPT = 1 # Quick-min
LCLIMB = .TRUE. # Climbing image
SPRING = -5.0
IMAGES = 5 # 5 intermediate images
# Electronic relaxation
ALGO = Fast
NELM = 100
NELMIN = 4
LREAL = Auto
# Spin and vdW
ISPIN = 2
MAGMOM = 16*0 32*4 8*1 # C:0, Mo:4, H:1
LUSE_VDW = .TRUE.
IVDW = 11
LASPH = .TRUE.
# Parallelization
NCORES = 24
NPAR = 4
KPAR = 6
# Output
LWAVE = .TRUE.
LCHARG = .TRUE.

4
06/KPOINTS Normal file
View File

@@ -0,0 +1,4 @@
Automatic mesh
0
Monkhorst-Pack
2 4 4

68
06/POSCAR Normal file
View File

@@ -0,0 +1,68 @@
H2 dissociated on Mo2C surface
1.00000000000000
17.7369999999999983 0.0000000000000000 0.0000000000000000
0.0000000000000000 12.0860000000000003 0.0000000000000000
0.0000000000000000 0.0000000000000000 10.4359999999999999
C Mo H
16 32 8
Selective dynamics
Direct
0.0000000000000000 0.1890000000000001 0.1250000000000000 F F F
0.0000000000000000 0.3109999999999999 0.3750000000000000 F F F
0.0000000000000000 0.6890000000000001 0.1250000000000000 F F F
0.0000000000000000 0.8109999999999999 0.3750000000000000 F F F
0.0000000000000000 0.1890000000000001 0.6250000000000000 F F F
0.0000000000000000 0.3109999999999999 0.8750000000000000 F F F
0.0000000000000000 0.6890000000000001 0.6250000000000000 F F F
0.0000000000000000 0.8109999999999999 0.8750000000000000 F F F
0.1320119929516228 0.0593457408891106 0.3747810153898691 T T T
0.1339040209268587 0.4382431092148634 0.1246005476156678 T T T
0.1346014460976624 0.5607842949985916 0.3745985782632549 T T T
0.1318895420043087 0.9396115085588336 0.1252676705134894 T T T
0.1339355733708381 0.0614996876186402 0.8750674941722374 T T T
0.1331797503730110 0.4386650366715458 0.6233685812881605 T T T
0.1342318012489764 0.5598322800816591 0.8735113412793347 T T T
0.1332329912804981 0.9394597661817066 0.6219321527751450 T T T
0.0656989344308485 0.0609999999999999 0.0394999999999968 F F F
0.0678354851440517 0.1890000000000001 0.2894999999999968 F F F
0.0678354851440517 0.3109999999999999 0.0394999999999968 F F F
0.0656989344308485 0.4390000000000001 0.2894999999999968 F F F
0.0656989344308485 0.5609999999999999 0.0394999999999968 F F F
0.0678354851440517 0.6890000000000001 0.2894999999999968 F F F
0.0678354851440517 0.8109999999999999 0.0394999999999968 F F F
0.0656989344308485 0.9390000000000001 0.2894999999999968 F F F
0.0656989344308485 0.0609999999999999 0.5394999999999968 F F F
0.0678354851440517 0.1890000000000001 0.7894999999999968 F F F
0.0678354851440517 0.3109999999999999 0.5394999999999968 F F F
0.0656989344308485 0.4390000000000001 0.7894999999999968 F F F
0.0656989344308485 0.5609999999999999 0.5394999999999968 F F F
0.0678354851440517 0.6890000000000001 0.7894999999999968 F F F
0.0678354851440517 0.8109999999999999 0.5394999999999968 F F F
0.0656989344308485 0.9390000000000001 0.7894999999999968 F F F
0.1899635985462743 0.0715452722381709 0.2056722342975337 T T T
0.1898701472331455 0.1926721066860241 0.4654860691830805 T T T
0.1890518958956355 0.3062937312891991 0.2197815432962685 T T T
0.1906584487652120 0.4229511228223711 0.4545465799678731 T T T
0.1906005809118210 0.5730889081693626 0.2024716224638729 T T T
0.1882343850141806 0.6932841047230595 0.4700914833922938 T T T
0.1899240393588936 0.8093603028954814 0.2175916994709843 T T T
0.1937641502147673 0.9295111734818007 0.4540488978640193 T T T
0.1907908420520039 0.0711821519253457 0.7037129217433928 T T T
0.1867802362246722 0.1982063494207132 0.9667270103755806 T T T
0.1891879480762782 0.3077078845148332 0.7201952360379164 T T T
0.1912388918870658 0.4251711384205735 0.9517544247373603 T T T
0.1966550503624373 0.5724834144836694 0.7011711849035441 T T T
0.1895418326796766 0.6922556995684713 0.9703854872411387 T T T
0.1868396807657923 0.8086011749407832 0.7199966590158274 T T T
0.1915129559134146 0.9256614081688295 0.9541573084958859 T T T
0.2500652879714319 0.8090456263635705 0.0545641575316365 T T T
0.2513392081223993 0.3046808670438062 0.0566045935240033 T T T
0.2479311638705096 0.0586269752411525 0.5320845315351295 T T T
0.2476814460919670 0.6895986355994584 0.8084849185407729 T T T
0.2492806440460436 0.3073980778169251 0.5556385058448301 T T T
0.2477965431497549 0.6936376688880892 0.3079168908739140 T T T
0.2835729538717008 0.5931327070274096 0.6083376880979403 T T T
0.2963206835887032 0.5407372680262799 0.6781487631358020 T T T
# Dissociated H atoms adsorbed on Mo sites
0.2500000000000000 0.2500000000000000 0.3500000000000000 T T T
0.7500000000000000 0.7500000000000000 0.3500000000000000 T T T

6479
06/POTCAR Normal file

File diff suppressed because it is too large Load Diff

36
INCAR Normal file
View File

@@ -0,0 +1,36 @@
SYSTEM = NEB H2 dissociation on Mo2C(100)
ENCUT = 415
PREC = Normal
ISMEAR = 0
SIGMA = 0.1
EDIFF = 1E-5
EDIFFG = -0.05
NSW = 200
IBRION = 3 # NEB
POTIM = 0.05
IOPT = 1 # Quick-min
LCLIMB = .TRUE. # Climbing image
SPRING = -5.0
IMAGES = 5 # 5 intermediate images
# Electronic relaxation
ALGO = Fast
NELM = 100
NELMIN = 4
LREAL = Auto
# Spin and vdW
ISPIN = 2
MAGMOM = 16*0 32*4 8*1 # C:0, Mo:4, H:1
LUSE_VDW = .TRUE.
IVDW = 11
LASPH = .TRUE.
# Parallelization
NCORES = 24
NPAR = 4
KPAR = 6
# Output
LWAVE = .TRUE.
LCHARG = .TRUE.

4
KPOINTS Normal file
View File

@@ -0,0 +1,4 @@
Automatic mesh
0
Monkhorst-Pack
2 4 4

6479
POTCAR Normal file

File diff suppressed because it is too large Load Diff

95
analyze_neb.py Executable file
View File

@@ -0,0 +1,95 @@
#!/usr/bin/env python3
"""
Analyze NEB results and calculate reaction rate
"""
import numpy as np
import os
import matplotlib.pyplot as plt
from ase.io import read
from ase.neb import NEBTools
def read_neb_energies():
"""Read energies from OUTCAR files"""
energies = []
forces = []
# Find all image directories
dirs = sorted([d for d in os.listdir('.') if os.path.isdir(d) and d.isdigit()])
for d in dirs:
outcar = os.path.join(d, 'OUTCAR')
if os.path.exists(outcar):
with open(outcar, 'r') as f:
lines = f.readlines()
for line in lines:
if 'free energy TOTEN' in line:
energy = float(line.split()[-2])
energies.append(energy)
elif 'TOTAL-FORCE' in line:
# Read forces (simplified)
pass
return np.array(energies)
def calculate_reaction_rate(E_a, T=300, A=1e13):
"""
Calculate reaction rate using Arrhenius equation
k = A * exp(-E_a / (k_B * T))
Parameters:
E_a: activation energy (eV)
T: temperature (K)
A: pre-exponential factor (s^-1)
Returns:
k: reaction rate (s^-1)
"""
k_B = 8.617333262145e-5 # eV/K
k = A * np.exp(-E_a / (k_B * T))
return k
def main():
# Read energies
energies = read_neb_energies()
if len(energies) == 0:
print("No OUTCAR files found!")
return
print(f"Found {len(energies)} images")
print(f"Energies (eV): {energies}")
# Calculate activation energy
E_a = energies.max() - energies[0]
print(f"\nActivation energy E_a = {E_a:.3f} eV")
# Calculate reaction rates at different temperatures
temperatures = [250, 300, 350, 400, 450, 500]
print("\nReaction rates (s^-1):")
for T in temperatures:
k = calculate_reaction_rate(E_a, T)
print(f"T = {T} K: k = {k:.2e} s^-1")
# Plot energy profile
plt.figure(figsize=(10, 6))
plt.plot(range(len(energies)), energies - energies[0], 'o-', linewidth=2)
plt.xlabel('Reaction coordinate (image number)')
plt.ylabel('Energy (eV)')
plt.title(f'NEB Energy Profile (E_a = {E_a:.3f} eV)')
plt.grid(True, alpha=0.3)
plt.savefig('neb_energy_profile.png', dpi=300)
plt.show()
# Save results
with open('neb_results.txt', 'w') as f:
f.write(f"Activation energy: {E_a:.6f} eV\n")
f.write(f"Energy barrier: {energies.max() - energies.min():.6f} eV\n")
f.write(f"Reaction energy: {energies[-1] - energies[0]:.6f} eV\n")
f.write("\nReaction rates:\n")
for T in temperatures:
k = calculate_reaction_rate(E_a, T)
f.write(f"T = {T} K: k = {k:.6e} s^-1\n")
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,31 @@
# Electrochemistry
The project aims to develop a code for processing DFT calculations to
calculate the rate constant of the heterogeneous electron transfer from the
cathode surface to the redox particle in the electrolyte. The project is based
on the theories of Marcus, Landau-Zener, the concept of quantum capacity.
Part of the project uses Gerischer's approximations.
To date, the developed code allows to process the output DFT data from VASP and calculate:
1) 2D and 3D STM (scanning tunneling microscopy) images in various approximations of the acceptor orbital
(Tersoff-Hamann, Chen, numerical molecular orbitals calculated using cluster DFT: e.g. O2, IrCl6, Ru(NH3)6)
2) 2D ECSTM (electrochemical scanning tunneling microscopy) images taking into account various parameters of the system
3) DOS graphs
# Citing
If you use stm/GerischerMarkus.py file to calculate the alignment of energy levels between an electrolyte and
a redox couple, please cite: Kislenko V.A., Pavlov. S.V., Kislenko. S.A.
“Influence of defects in graphene on electron transfer kinetics: The role of the surface electronic structure”,
Electrochimica Acta, 341 (2020), 136011. DOI: 10.1016/j.electacta.2020.136011
If you use this project to generate stm images or HET rate constant with spatial resolution. please cite in addition:
Pavlov S.V., Kislenko V.A., Kislenko S.A. “Fast Method for Calculating Spatially Resolved Heterogeneous Electron
Transfer Kinetics and Application for Graphene With Defects”,
J. Phys. Chem. C 124(33) (2020), 18147-18155. DOI: 10.1021/acs.jpcc.0c05376
# Acknowledgments
1) A part of this project is supported by the by grant 18-03-00773A of Russian Foundation for Basic Research
2) We acknowledge QijingZheng (github.com/QijingZheng) for the VaspBandUnfolding project, which is very useful
for processing WAVECAR

View File

View File

View File

@@ -0,0 +1,39 @@
ElemNum2Name = {1: 'H', 2: 'He', 3: 'Li', 4: 'Be', 5: 'B', 6: 'C', 7: 'N', 8: 'O', 9: 'F', 10: 'Ne',
11: 'Na', 12: 'Mg', 13: 'Al', 14: 'Si', 15: 'P ', 16: 'S', 17: 'Cl', 18: 'Ar', 19: 'K ', 20: 'Ca',
21: 'Sc', 22: 'Ti', 23: 'V ', 24: 'Cr', 25: 'Mn', 26: 'Fe', 27: 'Co', 28: 'Ni', 29: 'Cu', 30: 'Zn',
31: 'Ga', 32: 'Ge', 33: 'As', 34: 'Se', 35: 'Br', 36: 'Kr', 37: 'Rb', 38: 'Sr', 39: 'Y ', 40: 'Zr',
41: 'Nb', 42: 'Mo', 43: 'Tc', 44: 'Ru', 45: 'Rh', 46: 'Pd', 47: 'Ag', 48: 'Cd', 49: 'In', 50: 'Sn',
51: 'Sb', 52: 'Te', 53: 'I ', 54: 'Xe', 55: 'Cs', 56: 'Ba', 57: 'La', 58: 'Ce', 59: 'Pr', 60: 'Nd',
61: 'Pm', 62: 'Sm', 63: 'Eu', 64: 'Gd', 65: 'Tb', 66: 'Dy', 67: 'Ho', 68: 'Er', 69: 'Tm', 70: 'Yb',
71: 'Lu', 72: 'Hf', 73: 'Ta', 74: 'W ', 75: 'Re', 76: 'Os', 77: 'Ir', 78: 'Pt', 79: 'Au', 80: 'Hg',
81: 'Tl', 82: 'Pb', 83: 'Bi', 84: 'Po', 85: 'At', 86: 'Rn', 87: 'Fr', 88: 'Ra', 89: 'Ac', 90: 'Th',
91: 'Pa', 92: 'U ', 93: 'Np', 94: 'Pu', 95: 'Am', 96: 'Cm', 97: 'Bk', 98: 'Cf', 99: 'Es', 100: 'Fm',
101: 'Md', 102: 'No', 103: 'Lr', 104: 'Rf', 105: 'Db', 106: 'Sg', 107: 'Bh', 108: 'Hs', 109: 'Mt',
110: 'Ds', 111: 'Rg', 112: 'Cn', 113: 'Nh', 114: 'Fl', 115: 'Mc', 116: 'Lv', 117: 'Ts', 118: 'Og'}
ElemName2Num = {v: k for k, v in zip(ElemNum2Name.keys(), ElemNum2Name.values())}
Bohr2Angstrom = 0.529177
Angstrom2Bohr = 1 / Bohr2Angstrom
Hartree2eV = 27.2114
eV2Hartree = 1 / Hartree2eV
THz2eV = 4.136e-3
eV2THz = 1 / THz2eV
amu2kg = 1.6605402e-27
PLANCK_CONSTANT = 4.135667662e-15 # Planck's constant in eV*s
BOLTZMANN_CONSTANT = 8.617333262145e-5 # Boltzmann's constant in eV/K
ELEM_CHARGE = 1.60217662e-19 # Elementary charge in Coulombs
BOHR_RADIUS = 1.88973 #TODO Check
Bader_radii_Bohr = {'H': 2.88, 'He': 2.5, 'Li': 4.18, 'Be': 4.17, 'B': 3.91, 'C': 3.62, 'N': 3.35, 'O': 3.18,
'F': 3.03, 'Ne': 2.89, 'Na': 4.25, 'Mg': 4.6, 'Al': 4.61, 'Si': 4.45, 'P': 4.23, 'S': 4.07,
'Cl': 3.91, 'Ar': 3.72, 'Li_plus': 1.82, 'Na_plus': 2.47, 'F_minus': 3.49, 'Cl_minus': 4.36}
IDSCRF_radii_Angstrom = {'H': 1.77, 'He': 1.49, 'Li': 2.22, 'Be': 2.19, 'B': 2.38, 'C': 2.22, 'N': 2.05, 'O': 1.87,
'F': 1.89, 'Ne': 1.74}
# Colorblindness-friendly colors from https://github.com/mpetroff/accessible-color-cycles
cbf6 = ["#5790fc", "#f89c20", "#e42536", "#964a8b", "#9c9ca1", "#7a21dd"]
cbf8 = ["#1845fb", "#ff5e02", "#c91f16", "#c849a9", "#adad7d", "#86c8dd", "#578dff", "#656364"]
cbf10 = ["#3f90da", "#ffa90e", "#bd1f01", "#94a4a2", "#832db6", "#a96b59", "#e76300", "#b9ac70", "#717581", "#92dadd"]

View File

@@ -0,0 +1,128 @@
from __future__ import annotations
import numpy as np
from typing import Union, Iterable
from nptyping import NDArray, Shape, Number
class EBS:
"""
Class for calculating DOS
Args:
eigenvalues
"""
def __init__(self,
eigenvalues: NDArray[Shape['Nspin, Nkpts, Nbands'], Number],
weights: NDArray[Shape['Nkpts'], Number] = None,
efermi: float = None,
occupations: NDArray[Shape['Nspin, Nkpts, Nbands'], Number] = None):
self.eigenvalues = eigenvalues
self.occupations = occupations
self.efermi = efermi
if weights is None:
self.weights = np.ones(eigenvalues.shape[1]) / eigenvalues.shape[1]
else:
self.weights = weights
@property
def nspin(self):
return self.eigenvalues.shape[0]
@property
def nkpts(self):
return self.eigenvalues.shape[1]
@property
def nbands(self):
return self.eigenvalues.shape[2]
@staticmethod
def gaussian_smearing(E: NDArray[Shape['Ngrid'], Number],
E0: NDArray[Shape['*, ...'], Number],
sigma: float):
"""
Blur the Delta function by a Gaussian function
Args:
E: Numpy array with the shape (ngrid, ) that represents the energy range for the Gaussian smearing
E0: Numpy array with any shape (i.e. (nspin, nkpts, nbands)) that contains eigenvalues
sigma: the broadening parameter for the Gaussian function
Returns:
Smeared eigenvalues on grind E with the shape E0.shape + E.shape
"""
return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(
-(np.broadcast_to(E, E0.shape + E.shape) - np.expand_dims(E0, len(E0.shape))) ** 2 / (2 * sigma ** 2))
def __get_by_bands(self,
property: str,
bands: Union[int, Iterable[int]]):
property = getattr(self, property)
if type(bands) is int:
if self.nspin == 1:
return property[:, bands]
elif self.nspin > 1:
return property[:, :, bands]
elif isinstance(bands, Iterable):
if self.nspin == 1:
return property[:, bands].transpose(1, 0)
elif self.nspin > 1:
return property[:, :, bands].transpose(2, 0, 1)
else:
raise ValueError('Variable bands should be int or iterable')
def get_band_eigs(self, bands: Union[int, Iterable]):
return self.__get_by_bands('eigenvalues', bands)
def get_band_occ(self, bands: Union[int, Iterable]):
if self.occupations is not None:
return self.__get_by_bands('occupations', bands)
else:
raise ValueError('Occupations has not been defined')
def get_DOS(self,
dE: float = 0.01,
emin: float = None,
emax: float = None,
zero_at_fermi: bool = False,
smearing: str = 'gaussian',
sigma: float = 0.02) -> tuple[NDArray[Shape['Ngrid'], Number], NDArray[Shape['Nspin, Ngrid'], Number]]:
"""Calculate Density of States based on eigenvalues and its weights
Args:
dE (float, optional): step of energy array in function's output. Default value is 0.01
zero_at_fermi (bool, optional): if True Fermi energy will be equal to zero
emin (float, optional): minimum value in DOS calculation.
emax (float, optional): maximum value in DOS calculation.
smearing (str, optional): define whether you will be used smearing or not. Default value is 'Gaussian'.
Possible options: 'gaussian'
sigma (float, optional): define the sigma parameter in Gaussian smearing. Default value is 0.02
Returns:
E, DOS - Two 1D np.arrays that contain energy and according to DOS values
"""
if zero_at_fermi is True and self.efermi is None:
raise ValueError('You can not set zero_at_fermi=True if you did not specify efermi value')
if emin is None:
E_min = np.min(self.eigenvalues) - 1
if emax is None:
E_max = np.max(self.eigenvalues) + 1
E_arr = np.arange(E_min, E_max, dE)
if smearing.lower() == 'gaussian':
DOS_arr = np.sum(self.weights[None, :, None, None] *
self.gaussian_smearing(E_arr, self.eigenvalues, sigma), axis=(1, 2))
else:
raise NotImplemented(f'Smearing {smearing} is not implemented. Please use \'gaussian\' instead.')
# 2 means occupancy for non-spinpolarized calculation
if self.nspin == 1:
DOS_arr *= 2
if zero_at_fermi:
return E_arr - self.efermi, DOS_arr
else:
return E_arr, DOS_arr

View File

@@ -0,0 +1,102 @@
from __future__ import annotations
import numpy as np
from nptyping import NDArray, Shape, Number
class IonicDynamics:
def __init__(self,
forces_hist: NDArray[Shape['Nsteps, Natoms, 3'], Number] | None,
coords_hist: NDArray[Shape['Nsteps, Natoms, 3'], Number] | None,
lattice: NDArray[Shape['3, 3'], Number] | None,
coords_are_cartesian: bool | None):
self.forces_hist = forces_hist
self.coords_hist = coords_hist
self.lattice = lattice
self.coords_are_cartesian = coords_are_cartesian
@property
def forces(self):
if self.forces_hist is not None:
return self.forces_hist[-1]
else:
raise ValueError('Forces_hist is None')
def get_forces(self,
mod: str = 'mean',
diff: bool = False):
"""
Args:
mod (str, optional):
norm - (N_steps, N_atoms) returns the norm of forces along the ionic trajectory
mean - (N_steps, ) returns the mean value of forces' norm in simulation cell along the ionic trajectory
max - (N_steps, ) returns the max value of forces' norm in simulation cell along the ionic trajectory
diff (bool, optional): if True returns absolute value of forces differences between i and i+1 steps.
If False returns just forces values at each step
Returns:
"""
if self.forces_hist is not None:
if mod == 'norm':
forces = np.linalg.norm(self.forces_hist, axis=2)
elif mod == 'mean':
forces = np.mean(np.linalg.norm(self.forces_hist, axis=2), axis=1)
elif mod == 'max':
forces = np.max(np.linalg.norm(self.forces_hist, axis=2), axis=1)
else:
raise ValueError(f'mod should be norm/mean/max. You set {mod}')
if diff:
return np.abs(forces[1:] - forces[:-1])
else:
return forces
else:
raise ValueError('Forces_hist is None')
def get_displacements(self,
i: int | None = None,
j: int | None = None,
scalar: bool = True) -> (NDArray[Shape['Natoms, 3']] |
NDArray[Shape['Natoms, 1']] |
NDArray[Shape['Nsteps, Natoms, 3']] |
NDArray[Shape['Nsteps, Natoms, 1']]):
if self.coords_hist is not None and \
self.lattice is not None and \
self.coords_are_cartesian is not None:
if isinstance(i, int) and isinstance(j, int):
if self.coords_are_cartesian:
transform = np.linalg.inv(self.lattice)
r1 = np.matmul(self.coords_hist[i], transform)
r2 = np.matmul(self.coords_hist[j], transform)
else:
r1 = self.coords_hist[i]
r2 = self.coords_hist[j]
R = r2 - r1
R = (R + 0.5) % 1 - 0.5
assert np.all(R >= - 0.5) and np.all(R <= 0.5)
if scalar:
return np.linalg.norm(np.matmul(R, self.lattice), axis=1)
else:
return np.matmul(R, self.lattice)
elif i is None or j is None:
if self.coords_are_cartesian:
transform = np.linalg.inv(self.lattice)
R = np.matmul(self.coords_hist, transform)
else:
R = self.coords_hist
R = np.diff(R, axis=0)
R = (R + 0.5) % 1 - 0.5
if scalar:
return np.linalg.norm(np.matmul(R, self.lattice), axis=2)
else:
return np.matmul(R, self.lattice)
else:
raise ValueError('Method get_distance_matrix can only be called '
'if coords_hist, lattice and coords_are_cartesian are not None')

View File

@@ -0,0 +1,321 @@
from __future__ import annotations
import numpy as np
from typing import Iterable
from termcolor import colored
from nptyping import NDArray, Shape, Number
class Structure:
"""
Basic class for structure of unit/supercell.
Args:
lattice: 2D array that contains lattice vectors. Each row should correspond to a lattice vector.
E.g., [[5, 5, 0], [7, 4, 0], [0, 0, 25]].
species: List of species on each site. Usually list of elements, e.g., ['Al', 'Al', 'O', 'H'].
coords: List of lists or np.ndarray (Nx3 dimension) that contains coords of each species.
coords_are_cartesian: True if coords are cartesian, False if coords are fractional
"""
def __init__(self,
lattice: NDArray[Shape[3, 3], Number] | list[list[float]],
species: list[str],
coords: NDArray[Shape['Natoms, 3'], Number] | list[list[float]],
coords_are_cartesian: bool = True):
if len(species) != len(coords):
raise StructureError('Number of species and its coords must be the same')
self.species = species
if isinstance(lattice, np.ndarray):
self.lattice = lattice
else:
self.lattice = np.array(lattice)
if isinstance(coords, np.ndarray):
self.coords = coords
else:
self.coords = np.array(coords)
self.coords_are_cartesian = coords_are_cartesian
def __repr__(self) -> str:
lines = ['\nLattice:']
width = len(str(int(np.max(self.lattice)))) + 6
for axis in self.lattice:
lines.append(' '.join([f'{axis_coord:{width}.5f}' for axis_coord in axis]))
width = len(str(int(np.max(self.coords)))) + 6
lines.append('\nSpecies:')
unique, counts = np.unique(self.species, return_counts=True)
if len(self.species) < 10:
lines.append(' '.join([s for s in self.species]))
for u, c in zip(unique, counts):
lines.append(f'{u}:\t{c}')
lines.append(f'\nCoords are cartesian: {self.coords_are_cartesian}')
lines.append('\nCoords:')
for coord in self.coords:
lines.append(' '.join([f'{c:{width}.5f}' for c in coord]))
else:
part_1 = ' '.join([s for s in self.species[:5]])
part_2 = ' '.join([s for s in self.species[-5:]])
lines.append(part_1 + ' ... ' + part_2)
for u, c in zip(unique, counts):
lines.append(f'{u}:\t{c}')
lines.append(f'\nCoords are cartesian: {self.coords_are_cartesian}')
lines.append('\nCoords:')
for coord in self.coords[:5]:
lines.append(' '.join([f'{c:{width}.5f}' for c in coord]))
lines.append('...')
for coord in self.coords[-5:]:
lines.append(' '.join([f'{c:{width}.5f}' for c in coord]))
return '\n'.join(lines)
def __eq__(self, other: Structure) -> bool:
assert isinstance(other, Structure), 'Other object be a Structure class'
if not self.coords_are_cartesian == other.coords_are_cartesian:
if self.coords_are_cartesian:
other.mod_coords_to_cartesian()
print(colored('Coords of other were modified into Cartesian', color='green'))
else:
other.mod_coords_to_direct()
print(colored('Coords of other were modified into Direct', color='green'))
return np.allclose(self.lattice, other.lattice, atol=1e-10, rtol=1e-10) and \
(self.species == other.species) and \
np.allclose(self.coords, other.coords, atol=1e-10, rtol=1e-10)
@property
def natoms(self) -> int:
return len(self.species)
@property
def natoms_by_type(self) -> dict[str: int]:
unique, counts = np.unique(self.species, return_counts=True)
return {i: j for i, j in zip(unique, counts)}
def mod_add_atoms(self, coords, species) -> None:
"""
Adds atoms in the Structure
Args:
coords: List or np.ndarray (Nx3 dimension) that contains coords of each species
species: List of species on each site. Usually list of elements, e.g., ['Al', 'Al', 'O', 'H']
"""
if not isinstance(coords, np.ndarray):
coords = np.array(coords)
self.coords = np.vstack((self.coords, coords))
if isinstance(species, str):
self.species += [species]
else:
self.species += species
def mod_delete_atoms(self,
ids: int | list[int]) -> None:
"""
Deletes selected atoms by ids
Args:
ids: sequence of atoms ids
"""
self.coords = np.delete(self.coords, ids, axis=0)
self.species = np.delete(self.species, ids)
def mod_change_atoms(self,
ids: int | list[int],
coords: NDArray[Shape['Nids, 3'], Number] | None,
species: str | list[str] | None) -> None:
"""
Change selected atom by id
Args:
ids: List or int. First id is zero.
coords: None or np.array with new coords, e.g. np.array([1, 2, 4.6])
species: None or str or List[str]. New types of changed atoms
Returns:
"""
if coords is not None:
self.coords[ids] = coords
if species is not None:
if isinstance(ids, Iterable):
for i, sp in zip(ids, species):
self.species[i] = sp
else:
self.species[ids] = species
def mod_coords_to_cartesian(self) -> None | str:
"""
Converts species coordinates to Cartesian coordination system.
"""
if self.coords_are_cartesian is True:
return 'Coords are already cartesian'
else:
self.coords = np.matmul(self.coords, self.lattice)
self.coords_are_cartesian = True
def mod_coords_to_direct(self) -> None | str:
"""
Converts species coordinates to Direct coordination system.
"""
if self.coords_are_cartesian is False:
return 'Coords are already direct'
else:
transform = np.linalg.inv(self.lattice)
self.coords = np.matmul(self.coords, transform)
self.coords_are_cartesian = False
def mod_add_vector(self,
vector: NDArray[Shape['3'], Number],
cartesian: bool = True) -> None:
"""
Adds a vector to all atoms.
Args:
vector (np.ndarray): a vector which will be added to coordinates of all species
cartesian (bool): determine in which coordination system the vector defined. Cartesian=True means that
the vector is defined in Cartesian coordination system. Cartesian=False means that the vector is defined
in Direct coordination system.
"""
self.mod_coords_to_direct()
if cartesian:
transform = np.linalg.inv(self.lattice)
vector = np.matmul(vector, transform)
self.coords += vector
self.coords %= np.array([1, 1, 1])
def mod_propagate_unit_cell(self, a, b, c) -> None:
"""Extend the unit cell by propagation on lattice vector direction. The resulted unit cell will be the size
of [a * lattice[0], b * lattice[1], c * lattice[2]]. All atoms will be replicated with accordance of new
unit cell size
Args:
a (int): the number of replicas in the directions of the first unit cell vector
b (int): the number of replicas in the directions of the second unit cell vector
c (int): the number of replicas in the directions of the third unit cell vector.
"""
if not (isinstance(a, int) and isinstance(b, int) and isinstance(c, int)):
raise ValueError(f'a, b and c must be integers. But a, b, c have types {type(a), type(b), type(c)}')
if a * b * c > 1:
self.mod_coords_to_cartesian()
ref_coords = self.coords.copy()
ref_species = self.species.copy()
for i in range(a):
for j in range(b):
for k in range(c):
if i != 0 or j != 0 or k != 0:
vector = np.sum(np.array([i, j, k]).reshape(-1, 1) * self.lattice, axis=0)
self.coords = np.vstack((self.coords, ref_coords + vector))
self.species += ref_species
self.lattice = np.array([a, b, c]).reshape(-1, 1) * self.lattice
else:
raise ValueError(f'a, b and c must be positive. But {a=} {b=} {c=}')
def get_vector(self,
id_1: int,
id_2: int,
unit: bool = True) -> NDArray[Shape['3'], Number]:
"""
Returns a vector (unit vector by default) which starts in the atom with id_1 and points to the atom with id_2
Args:
id_1: id of the first atom (vector origin)
id_2: id of the second atom
unit: Defines whether the vector will be normed or not. Unit=True means that the vector norm will be equal 1
Returns (np.ndarray): vector from atom with id_1 to atom with id_2
"""
vector = self.coords[id_2] - self.coords[id_1]
if unit:
return vector / np.linalg.norm(vector)
else:
return vector
def get_distance_matrix(self) -> NDArray[Shape['Natoms, Natoms, 3'], Number]:
"""
Returns distance matrix R, where R[i,j] is the vector from atom i to atom j.
Returns:
np.ndarray (NxNx3 dimensions) which is a distance matrix in Cartesian coordination system
"""
self.mod_coords_to_direct()
r1 = np.broadcast_to(self.coords.reshape((self.natoms, 1, 3)), (self.natoms, self.natoms, 3))
r2 = np.broadcast_to(self.coords.reshape((1, self.natoms, 3)), (self.natoms, self.natoms, 3))
R = r2 - r1
R = (R + 0.5) % 1 - 0.5
assert np.all(R >= - 0.5) and np.all(R <= 0.5)
return np.matmul(R, self.lattice)
def get_distance_matrix_scalar(self) -> NDArray[Shape['Natoms, Natoms'], Number]:
"""
Returns distance matrix R, where R[i, j] is the Euclidean norm of a vector from atom i to atom j.
Returns:
np.ndarray (NxN dimensions) which is a distance matrix containing scalars
"""
R = self.get_distance_matrix()
return np.sqrt(np.sum(R * R, axis=2))
def get_filtered_ids(self, **kwargs):
"""
Returns np.ndarray that contains atom ids according to filter rules
Args:
species (str): Define which atom type will be selected. E.g. 'C H N' means select all C, H, and N atoms.
'!C' means select all atoms except C.
Returns:
np.ndarray contains ids of atoms according to selecting rules
"""
filter_mask = np.array([True for _ in range(self.natoms)], dtype=np.bool_)
if 'species' in kwargs:
species = kwargs['species'].split()
species_select = []
species_not_select = []
for specie in species:
if '!' in specie:
species_not_select.append(specie.replace('!', ''))
else:
species_select.append(specie)
if len(species_select):
fm_local = np.array([False for _ in range(self.natoms)], dtype=np.bool_)
for specie in species_select:
fm_local += np.array([True if atom_name == specie else False for atom_name in self.species])
filter_mask *= fm_local
if len(species_not_select):
fm_local = np.array([True for _ in range(self.natoms)], dtype=np.bool_)
for specie in species_not_select:
fm_local *= np.array([False if atom_name == specie else True for atom_name in self.species])
filter_mask *= fm_local
if 'x' in kwargs:
left, right = kwargs['x']
fm_local = np.array([False for _ in range(self.natoms)], dtype=np.bool_)
for i, atom_coord in enumerate(self.coords):
if left < atom_coord[0] < right:
fm_local[i] = True
filter_mask *= fm_local
if 'y' in kwargs:
left, right = kwargs['y']
fm_local = np.array([False for _ in range(self.natoms)], dtype=np.bool_)
for i, atom_coord in enumerate(self.coords):
if left < atom_coord[1] < right:
fm_local[i] = True
filter_mask *= fm_local
if 'z' in kwargs:
left, right = kwargs['z']
fm_local = np.array([False for _ in range(self.natoms)], dtype=np.bool_)
for i, atom_coord in enumerate(self.coords):
if left < atom_coord[2] < right:
fm_local[i] = True
filter_mask *= fm_local
return np.array(range(self.natoms))[filter_mask]
class StructureError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
pass

View File

@@ -0,0 +1,106 @@
from __future__ import annotations
import numpy as np
from nptyping import NDArray, Shape, Number
import warnings
class ThermalProperties:
"""
Class for calculation thermal properties based on the calculated phonon spectra
Args:
eigen_freq (np.ndarray [nkpts, nfreq]): The energies of phonon eigenfrequencies in eV
weights (np.ndarray [nkpts, ], optional): weights of k-points. The sum of weights should be equal to 1.
If weights are not provided, all k-points will be considered with equal weights (1 / nkpts).
"""
k_B_J = 1.380649e-23 # J/K
k_B_eV = 8.617333262e-5 # eV/K
hbar_J = 1.054571817e-34 # J*s
def __init__(self,
eigen_freq: NDArray[Shape['Nkpts, Nfreq'], Number],
weights: NDArray[Shape['Nkpts'], Number] = None):
if weights is None:
self.weights = np.ones(eigen_freq.shape[0]) / eigen_freq.shape[0]
else:
self.weights = weights
if np.sum(eigen_freq < 0) > 0:
warnings.warn('\nThere is at least one imaginary frequency in given eigenfrequencies. '
'\nAll imaginary frequencies will be dropped from any further calculations'
f'\nImaginary frequencies: {eigen_freq[eigen_freq < 0]}')
self.eigen_freq = np.maximum(0, eigen_freq)
def get_Gibbs_ZPE(self) -> float:
r"""
Calculate Zero Point Energy
.. math::
E_{ZPE} = \sum_k weight(k) \sum_i \frac{hw_{i}(k)}{2}
Returns:
ZPE in eV
"""
return np.sum(self.weights * self.eigen_freq) / 2
def get_enthalpy_vib(self, T) -> float:
r"""
Calculate the thermal term in vibrational energy
.. math::
E_{temp}(k) = \sum_i \left( \frac{hw_{i}(k)}{( \exp (hw_{i}(k) / k_B T) - 1)} \right) \\\\
E_{temp} = \sum_k weight(k) \cdot E_{temp}(k)
Args:
T: Temperature in K
Returns:
Thermal vibrational energy in eV
"""
k_B = 8.617333262145e-5 # Boltzmann's constant in eV/K
return np.sum(np.nan_to_num(self.weights * self.eigen_freq / (np.exp(self.eigen_freq / (k_B * T)) - 1)))
def get_TS_vib(self, T) -> float:
r"""
Calculate the vibrational entropy contribution
.. math::
S_{vib}(k) = \sum_i \left( \frac{hw_{i}(k)}{( \exp (hw_{i}(k) / k_B T) - 1)} -
k_B ln \left( 1 - \exp \left(- \frac{hw_i(k)}{k_B T} \right) \right) \right) \\\\
T * S_{vib} = T \sum_k (weight_k S_{vib}(k))
Args:
T: Temperature in K
Returns:
TS in eV
"""
k_B = 8.617333262145e-5 # Boltzmann's constant in eV/K
second_term = - np.sum(self.weights * k_B * T * np.nan_to_num(np.log(1 - np.exp(- self.eigen_freq / (k_B * T))),
neginf=0))
return self.get_enthalpy_vib(T) + second_term
def get_Gibbs_vib(self, T: float) -> float:
return self.get_Gibbs_ZPE() + self.get_enthalpy_vib(T) - self.get_TS_vib(T)
@classmethod
def get_Gibbs_trans(cls,
V: float,
mass: float,
T: float):
return - cls.k_B_eV * T * np.log(V * (mass * cls.k_B_J * T / (2 * np.pi * cls.hbar_J**2))**1.5)
@classmethod
def get_Gibbs_rot(cls,
I: float | list[float] | NDArray[Shape['3'], Number],
sigma: int,
T: float):
if type(I) is float or len(I) == 1:
return - cls.k_B_eV * T * np.log(2 * I * cls.k_B_J * T / (sigma * cls.hbar_J ** 2))
elif len(I) == 3:
return - cls.k_B_eV * T * np.log((2 * cls.k_B_J * T)**1.5 * (np.pi * I[0] * I[1] * I[2])**0.5 /
(sigma * cls.hbar_J**3))
else:
raise ValueError(f'I should be either float or array with length of 3, however {len(I)=}')

View File

@@ -0,0 +1,24 @@
import numpy as np
import subprocess
class ClassMethods:
def check_existence(self, variable):
"""
This function checks whether desires variable is not None
:param variable: desired variable
:return: nothing
"""
if getattr(self, variable) is None:
raise ValueError(f'{variable} is not defined')
def nearest_array_index(array, value):
return (np.abs(array - value)).argmin()
def shell(cmd) -> str:
'''
Run shell command and return output as a string
'''
return subprocess.check_output(cmd, shell=True)

View File

@@ -0,0 +1,56 @@
#!/usr/bin/env python
'''
Physical constants used in VASP
This file was downloaded from https://github.com/QijingZheng/VaspBandUnfolding 22.05.2019
'''
# Some important Parameters, to convert to a.u.
# - AUTOA = 1. a.u. in Angstroem
# - RYTOEV = 1 Ry in Ev
# - EVTOJ = 1 eV in Joule
# - AMTOKG = 1 atomic mass unit ("proton mass") in kg
# - BOLKEV = Boltzmanns constant in eV/K
# - BOLK = Boltzmanns constant in Joule/K
AUTOA = 0.529177249
RYTOEV = 13.605826
CLIGHT = 137.037 # speed of light in a.u.
EVTOJ = 1.60217733E-19
AMTOKG = 1.6605402E-27
BOLKEV = 8.6173857E-5
BOLK = BOLKEV * EVTOJ
EVTOKCAL = 23.06
# FELECT = (the electronic charge)/(4*pi*the permittivity of free space)
# in atomic units this is just e^2
# EDEPS = electron charge divided by the permittivity of free space
# in atomic units this is just 4 pi e^2
# HSQDTM = (plancks CONSTANT/(2*PI))**2/(2*ELECTRON MASS)
#
PI = 3.141592653589793238
TPI = 2 * PI
CITPI = 1j * TPI
FELECT = 2 * AUTOA * RYTOEV
EDEPS = 4 * PI * 2 * RYTOEV * AUTOA
HSQDTM = RYTOEV * AUTOA * AUTOA
# vector field A times momentum times e/ (2 m_e c) is an energy
# magnetic moments are supplied in Bohr magnetons
# e / (2 m_e c) A(r) p(r) = energy
# e / (2 m_e c) m_s x ( r - r_s) / (r-r_s)^3 hbar nabla =
# e^2 hbar^2 / (2 m_e^2 c^2) 1/ lenght^3 = energy
# conversion factor from magnetic moment to energy
# checked independently in SI by Gilles de Wijs
MAGMOMTOENERGY = 1 / CLIGHT**2 * AUTOA**3 * RYTOEV
# dimensionless number connecting input and output magnetic moments
# AUTOA e^2 (2 m_e c^2)
MOMTOMOM = AUTOA / CLIGHT / CLIGHT / 2
AUTOA2 = AUTOA * AUTOA
AUTOA3 = AUTOA2 * AUTOA
AUTOA4 = AUTOA2 * AUTOA2
AUTOA5 = AUTOA3 * AUTOA2
# dipole moment in atomic units to Debye
AUTDEBYE = 2.541746

View File

@@ -0,0 +1,948 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program was downloaded from https://github.com/QijingZheng/VaspBandUnfolding 22.05.2019
We changed print calls (print x to print(x)) to run this program with python3
In lines: 274 276 278 325 429 431 444 468 469 480 481 711 713 715 /2 changed to //2
"""
import os
import numpy as np
from math import sqrt
from .vasp_constant import *
from multiprocessing import cpu_count
from scipy.fftpack import fftfreq, fftn, ifftn
############################################################
def save2vesta(phi=None, poscar='POSCAR', prefix='wfc',
lgam=False, lreal=False, ncol=10):
'''
Save the real space pseudo-wavefunction as vesta format.
'''
nx, ny, nz = phi.shape
try:
pos = open(poscar, 'r')
head = ''
for line in pos:
if line.strip():
head += line
else:
break
head += '\n%5d%5d%5d\n' % (nx, ny, nz)
except:
raise IOError('Failed to open %s' % poscar)
# Faster IO
nrow = phi.size // ncol
nrem = phi.size % ncol
fmt = "%16.8E"
psi = phi.copy()
psi = psi.flatten(order='F')
psi_h = psi[:nrow * ncol].reshape((nrow, ncol))
psi_r = psi[nrow * ncol:]
with open(prefix + '_r.vasp', 'w') as out:
out.write(head)
out.write(
'\n'.join([''.join([fmt % xx for xx in row])
for row in psi_h.real])
)
out.write("\n" + ''.join([fmt % xx for xx in psi_r.real]))
if not (lgam or lreal):
with open(prefix + '_i.vasp', 'w') as out:
out.write(head)
out.write(
'\n'.join([''.join([fmt % xx for xx in row])
for row in psi_h.imag])
)
out.write("\n" + ''.join([fmt % xx for xx in psi_r.imag]))
############################################################
class vaspwfc(object):
'''
Class for processing VASP Pseudowavefunction stored in WAVECAR. This
program is motivated by PIESTA written by Ren Hao <renh@upc.edu.cn>.
The format of VASP WAVECAR, as shown in
http://www.andrew.cmu.edu/user/feenstra/wavetrans/
is:
Record-length #spin components RTAG(a value specifying the precision)
#k-points #bands ENCUT(maximum energy for plane waves)
LatVec-A
LatVec-B
LatVec-C
Loop over spin
Loop over k-points
#plane waves, k vector
Loop over bands
band energy, band occupation
End loop over bands
Loop over bands
Loop over plane waves
Plane-wave coefficient
End loop over plane waves
End loop over bands
End loop over k-points
End loop over spin
'''
def __init__(self, fnm='WAVECAR', lsorbit=False, lgamma=False,
gamma_half='z', omp_num_threads=1):
'''
Initialization.
'''
self._fname = fnm
# the directory containing the input file
self._dname = os.path.dirname(fnm)
if self._dname == '':
self._dname = '.'
self._lsoc = lsorbit
self._lgam = lgamma
self._gam_half = gamma_half.lower()
# It seems that some modules in scipy uses OPENMP, it is therefore
# desirable to set the OMP_NUM_THREADS to tune the parallization.
os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)
assert not (lsorbit and lgamma), 'The two settings conflict!'
assert self._gam_half == 'x' or self._gam_half == 'z', \
'Gamma_half must be "x" or "z"'
try:
self._wfc = open(self._fname, 'rb')
except:
raise IOError('Failed to open %s' % self._fname)
# read the basic information
self.readWFHeader()
# read the band information
self.readWFBand()
if self._lsoc:
assert self._nspin == 1, "NSPIN = 1 for noncollinear version WAVECAR!"
def set_omp_num_threads(self, nproc):
'''
Set the OMP_NUM_THREADS envrionment variable
'''
assert 1 <= nproc <= cpu_count()
os.envrion['OMP_NUM_THREADS'] = str(nproc)
def isSocWfc(self):
"""
Is the WAVECAR from an SOC calculation?
"""
return True if self._lsoc else False
def isGammaWfc(self):
"""
Is the WAVECAR from an SOC calculation?
"""
return True if self._lgam else False
def readWFHeader(self):
'''
Read the system information from WAVECAR, which is written in the first
two record.
rec1: recl, nspin, rtag
rec2: nkpts, nbands, encut, ((cell(i,j) i=1, 3), j=1, 3)
'''
# goto the start of the file and read the first record
self._wfc.seek(0)
self._recl, self._nspin, self._rtag = np.array(
np.fromfile(self._wfc, dtype=np.float, count=3),
dtype=int
)
self._WFPrec = self.setWFPrec()
# the second record
self._wfc.seek(self._recl)
dump = np.fromfile(self._wfc, dtype=np.float, count=12)
self._nkpts = int(dump[0]) # No. of k-points
self._nbands = int(dump[1]) # No. of bands
self._encut = dump[2] # Energy cutoff
self._Acell = dump[3:].reshape((3,3)) # real space supercell basis
self._Omega = np.linalg.det(self._Acell) # real space supercell volume
self._Bcell = np.linalg.inv(self._Acell).T # reciprocal space supercell volume
# Minimum FFT grid size
Anorm = np.linalg.norm(self._Acell, axis=1)
CUTOF = np.ceil(
sqrt(self._encut / RYTOEV) / (TPI / (Anorm / AUTOA))
)
self._ngrid = np.array(2 * CUTOF + 1, dtype=int)
def setWFPrec(self):
'''
Set wavefunction coefficients precision:
TAG = 45200: single precision complex, np.complex64, or complex(qs)
TAG = 45210: double precision complex, np.complex128, or complex(q)
'''
if self._rtag == 45200:
return np.complex64
elif self._rtag == 45210:
return np.complex128
elif self._rtag == 53300:
raise ValueError("VASP5 WAVECAR format, not implemented yet")
elif self._rtag == 53310:
raise ValueError("VASP5 WAVECAR format with double precision "
+"coefficients, not implemented yet")
else:
raise ValueError("Invalid TAG values: {}".format(self._rtag))
def readWFBand(self):
'''
Extract KS energies and Fermi occupations from WAVECAR.
'''
self._nplws = np.zeros(self._nkpts, dtype=int)
self._kvecs = np.zeros((self._nkpts, 3), dtype=float)
self._bands = np.zeros((self._nspin, self._nkpts, self._nbands), dtype=float)
self._occs = np.zeros((self._nspin, self._nkpts, self._nbands), dtype=float)
for ii in range(self._nspin):
for jj in range(self._nkpts):
rec = self.whereRec(ii+1, jj+1, 1) - 1
self._wfc.seek(rec * self._recl)
dump = np.fromfile(self._wfc, dtype=np.float, count=4+3*self._nbands)
if ii == 0:
self._nplws[jj] = int(dump[0])
self._kvecs[jj] = dump[1:4]
dump = dump[4:].reshape((-1, 3))
self._bands[ii,jj,:] = dump[:,0]
self._occs[ii,jj,:] = dump[:,2]
if self._nkpts > 1:
tmp = np.linalg.norm(
np.dot(np.diff(self._kvecs, axis=0), self._Bcell), axis=1)
self._kpath = np.concatenate(([0,], np.cumsum(tmp)))
else:
self._kpath = None
return self._kpath, self._bands
def get_kpath(self, nkseg=None):
'''
Construct k-point path, find out the k-path boundary if possible.
nkseg is the number of k-points in each k-path segments.
'''
if nkseg is None:
if os.path.isfile(self._dname + "/KPOINTS"):
kfile = open(self._dname + "/KPOINTS").readlines()
if kfile[2][0].upper() == 'L':
nkseg = int(kfile[1].split()[0])
else:
raise ValueError('Error reading number of k-points from KPOINTS')
assert nkseg > 0
nsec = self._nkpts // nkseg
v = self._kvecs.copy()
for ii in range(nsec):
ki = ii * nkseg
kj = (ii + 1) * nkseg
v[ki:kj,:] -= v[ki]
self._kpath = np.linalg.norm(np.dot(v, self._Bcell), axis=1)
for ii in range(1, nsec):
ki = ii * nkseg
kj = (ii + 1) * nkseg
self._kpath[ki:kj] += self._kpath[ki - 1]
self._kbound = np.concatenate((self._kpath[0::nkseg], [self._kpath[-1],]))
return self._kpath, self._kbound
def gvectors(self, ikpt=1, force_Gamma=False, check_consistency=True):
'''
Generate the G-vectors that satisfies the following relation
(G + k)**2 / 2 < ENCUT
'''
assert 1 <= ikpt <= self._nkpts, 'Invalid kpoint index!'
kvec = self._kvecs[ikpt-1]
# fx, fy, fz = [fftfreq(n) * n for n in self._ngrid]
# fftfreq in scipy.fftpack is a little different with VASP frequencies
fx = [ii if ii < self._ngrid[0] // 2 + 1 else ii - self._ngrid[0]
for ii in range(self._ngrid[0])]
fy = [jj if jj < self._ngrid[1] // 2 + 1 else jj - self._ngrid[1]
for jj in range(self._ngrid[1])]
fz = [kk if kk < self._ngrid[2] // 2 + 1 else kk - self._ngrid[2]
for kk in range(self._ngrid[2])]
# force_Gamma: consider gamma-only case regardless of the real setting
lgam = True if force_Gamma else self._lgam
if lgam:
# parallel gamma version of VASP WAVECAR exclude some planewave
# components, -DwNGZHalf
if self._gam_half == 'z':
kgrid = np.array([(fx[ii], fy[jj], fz[kk])
for kk in range(self._ngrid[2])
for jj in range(self._ngrid[1])
for ii in range(self._ngrid[0])
if (
(fz[kk] > 0) or
(fz[kk] == 0 and fy[jj] > 0) or
(fz[kk] == 0 and fy[jj] == 0 and fx[ii] >= 0)
)], dtype=float)
else:
kgrid = np.array([(fx[ii], fy[jj], fz[kk])
for kk in range(self._ngrid[2])
for jj in range(self._ngrid[1])
for ii in range(self._ngrid[0])
if (
(fx[ii] > 0) or
(fx[ii] == 0 and fy[jj] > 0) or
(fx[ii] == 0 and fy[jj] == 0 and fz[kk] >= 0)
)], dtype=float)
else:
kgrid = np.array([(fx[ii], fy[jj], fz[kk])
for kk in range(self._ngrid[2])
for jj in range(self._ngrid[1])
for ii in range(self._ngrid[0])], dtype=float)
# Kinetic_Energy = (G + k)**2 / 2
# HSQDTM = hbar**2/(2*ELECTRON MASS)
KENERGY = HSQDTM * np.linalg.norm(
np.dot(kgrid + kvec[np.newaxis,:] , TPI*self._Bcell), axis=1
)**2
# find Gvectors where (G + k)**2 / 2 < ENCUT
Gvec = kgrid[np.where(KENERGY < self._encut)[0]]
# Check if the calculated number of planewaves and the one recorded in the
# WAVECAR are equal
if check_consistency:
if self._lsoc:
assert Gvec.shape[0] == self._nplws[ikpt - 1] // 2, \
'No. of planewaves not consistent for an SOC WAVECAR! %d %d %d' % \
(Gvec.shape[0], self._nplws[ikpt -1], np.prod(self._ngrid))
else:
assert Gvec.shape[0] == self._nplws[ikpt - 1], 'No. of planewaves not consistent! %d %d %d' % \
(Gvec.shape[0], self._nplws[ikpt -1], np.prod(self._ngrid))
return np.asarray(Gvec, dtype=int)
def save2vesta(self, phi=None, lreal=False, poscar='POSCAR', prefix='wfc',
ncol=10):
'''
Save the real space pseudo-wavefunction as vesta format.
'''
nx, ny, nz = phi.shape
try:
pos = open(poscar, 'r')
head = ''
for line in pos:
if line.strip():
head += line
else:
break
head += '\n%5d%5d%5d\n' % (nx, ny, nz)
except:
raise IOError('Failed to open %s' % poscar)
# Faster IO
nrow = phi.size // ncol
nrem = phi.size % ncol
fmt = "%16.8E"
psi = phi.copy()
psi = psi.flatten(order='F')
psi_h = psi[:nrow * ncol].reshape((nrow, ncol))
psi_r = psi[nrow * ncol:]
with open(prefix + '_r.vasp', 'w') as out:
out.write(head)
out.write(
'\n'.join([''.join([fmt % xx for xx in row])
for row in psi_h.real])
)
out.write("\n" + ''.join([fmt % xx for xx in psi_r.real]))
if not (self._lgam or lreal):
with open(prefix + '_i.vasp', 'w') as out:
out.write(head)
out.write(
'\n'.join([''.join([fmt % xx for xx in row])
for row in psi_h.imag])
)
out.write("\n" + ''.join([fmt % xx for xx in psi_r.imag]))
def wfc_r(self, ispin=1, ikpt=1, iband=1,
gvec=None, Cg=None, ngrid=None,
rescale=None,
norm=True):
'''
Obtain the pseudo-wavefunction of the specified KS states in real space
by performing FT transform on the reciprocal space planewave
coefficients. The 3D FT grid size is determined by ngrid, which
defaults to self._ngrid if not given. Gvectors of the KS states is used
to put 1D planewave coefficients back to 3D grid.
Inputs:
ispin : spin index of the desired KS states, starting from 1
ikpt : k-point index of the desired KS states, starting from 1
iband : band index of the desired KS states, starting from 1
gvec : the G-vectors correspond to the plane-wave coefficients
Cg : the plane-wave coefficients. If None, read from WAVECAR
ngrid : the FFT grid size
norm : normalized Cg?
The return wavefunctions are normalized in a way that
\sum_{ijk} | \phi_{ijk} | ^ 2 = 1
'''
self.checkIndex(ispin, ikpt, iband)
if ngrid is None:
ngrid = self._ngrid.copy() * 2
else:
ngrid = np.array(ngrid, dtype=int)
assert ngrid.shape == (3,)
assert np.alltrue(ngrid >= self._ngrid), \
"Minium FT grid size: (%d, %d, %d)" % \
(self._ngrid[0], self._ngrid[1], self._ngrid[2])
# The default normalization of np.fft.fftn has the direct transforms
# unscaled and the inverse transforms are scaled by 1/n. It is possible
# to obtain unitary transforms by setting the keyword argument norm to
# "ortho" (default is None) so that both direct and inverse transforms
# will be scaled by 1/\sqrt{n}.
# default normalization factor so that
# \sum_{ijk} | \phi_{ijk} | ^ 2 = 1
normFac = rescale if rescale is not None else np.sqrt(np.prod(ngrid))
if gvec is None:
gvec = self.gvectors(ikpt)
if self._lgam:
if self._gam_half == 'z':
phi_k = np.zeros((ngrid[0], ngrid[1], ngrid[2]//2 + 1), dtype=np.complex128)
else:
phi_k = np.zeros((ngrid[0]//2 + 1, ngrid[1], ngrid[2]), dtype=np.complex128)
else:
phi_k = np.zeros(ngrid, dtype=np.complex128)
gvec %= ngrid[np.newaxis,:]
if self._lsoc:
wfc_spinor = []
if Cg:
dump = Cg
else:
dump = self.readBandCoeff(ispin, ikpt, iband, norm)
nplw = dump.shape[0] // 2
# spinor up
phi_k[gvec[:,0], gvec[:,1], gvec[:,2]] = dump[:nplw]
wfc_spinor.append(ifftn(phi_k) * normFac)
# spinor down
phi_k[:,:,:] = 0.0j
phi_k[gvec[:,0], gvec[:,1], gvec[:,2]] = dump[nplw:]
wfc_spinor.append(ifftn(phi_k) * normFac)
del dump
return wfc_spinor
else:
if Cg is not None:
phi_k[gvec[:,0], gvec[:,1], gvec[:,2]] = Cg
else:
phi_k[gvec[:,0], gvec[:,1], gvec[:,2]] = self.readBandCoeff(ispin, ikpt, iband, norm)
if self._lgam:
# add some components that are excluded and perform c2r FFT
if self._gam_half == 'z':
for ii in range(ngrid[0]):
for jj in range(ngrid[1]):
fx = ii if ii < ngrid[0] // 2 + 1 else ii - ngrid[0]
fy = jj if jj < ngrid[1] // 2 + 1 else jj - ngrid[1]
if (fy > 0) or (fy == 0 and fx >= 0):
continue
phi_k[ii,jj,0] = phi_k[-ii,-jj,0].conjugate()
phi_k /= np.sqrt(2.)
phi_k[0,0,0] *= np.sqrt(2.)
return np.fft.irfftn(phi_k, s=ngrid) * normFac
elif self._gam_half == 'x':
for jj in range(ngrid[1]):
for kk in range(ngrid[2]):
fy = jj if jj < ngrid[1] // 2 + 1 else jj - ngrid[1]
fz = kk if kk < ngrid[2] // 2 + 1 else kk - ngrid[2]
if (fy > 0) or (fy == 0 and fz >= 0):
continue
phi_k[0,jj,kk] = phi_k[0,-jj,-kk].conjugate()
phi_k /= np.sqrt(2.)
phi_k[0,0,0] *= np.sqrt(2.)
phi_k = np.swapaxes(phi_k, 0, 2)
tmp = np.fft.irfftn(phi_k, s=(ngrid[2], ngrid[1], ngrid[0])) * normFac
return np.swapaxes(tmp, 0, 2)
else:
# perform complex2complex FFT
return ifftn(phi_k * normFac)
def readBandCoeff(self, ispin=1, ikpt=1, iband=1, norm=False):
'''
Read the planewave coefficients of specified KS states.
'''
self.checkIndex(ispin, ikpt, iband)
rec = self.whereRec(ispin, ikpt, iband)
self._wfc.seek(rec * self._recl)
nplw = self._nplws[ikpt - 1]
dump = np.fromfile(self._wfc, dtype=self._WFPrec, count=nplw)
cg = np.asarray(dump, dtype=np.complex128)
if norm:
cg /= np.linalg.norm(cg)
return cg
def whereRec(self, ispin=1, ikpt=1, iband=1):
'''
Return the rec position for specified KS state.
'''
self.checkIndex(ispin, ikpt, iband)
rec = 2 + (ispin - 1) * self._nkpts * (self._nbands + 1) + \
(ikpt - 1) * (self._nbands + 1) + \
iband
return rec
def checkIndex(self, ispin, ikpt, iband):
'''
Check if the index is valid!
'''
assert 1 <= ispin <= self._nspin, 'Invalid spin index!'
assert 1 <= ikpt <= self._nkpts, 'Invalid kpoint index!'
assert 1 <= iband <= self._nbands, 'Invalid band index!'
def TransitionDipoleMoment(self, ks_i, ks_j, norm=True,
realspace=False):
'''
calculate Transition Dipole Moment (TDM) between two KS states.
If "realspace = False", the TDM will be evaluated in momentum space
according to the formula in:
https://en.wikipedia.org/wiki/Transition_dipole_moment
i⋅h
<psi_a | r | psi_b> = -------------- ⋅ <psi_a | p | psi_b>
m⋅(Eb - Ea)
2 ____
h ╲
= ------------- ⋅ ╲ Cai⋅Cbi⋅Gi
m⋅(Eb - Ea)
‾‾‾‾
i
Otherwise, the TDM will be evaluated in real space.
Note: |psi_a> and |psi_b> should be bloch function with
the same k vector.
The KS states ks_i (ks_j) is specified by list of index (ispin, ikpt, iband).
'''
ks_i = list(ks_i); ks_j = list(ks_j)
assert len(ks_i) == len(ks_j) == 3, 'Must be three indexes!'
assert ks_i[1] == ks_j[1], 'k-point of the two states differ!'
self.checkIndex(*ks_i)
self.checkIndex(*ks_j)
# energy differences between the two states
E1 = self._bands[ks_i[0]-1, ks_i[1]-1, ks_i[2]-1]
E2 = self._bands[ks_j[0]-1, ks_j[1]-1, ks_j[2]-1]
dE = E2 - E1
if realspace:
fx = np.linspace(0, 1, self._ngrid[0], endpoint=False)
fy = np.linspace(0, 1, self._ngrid[1], endpoint=False)
fz = np.linspace(0, 1, self._ngrid[2], endpoint=False)
Dx, Dy, Dz = np.meshgrid(fx, fy, fz, indexing='ij')
Rx, Ry, Rz = np.tensordot(self._Acell, [Dx, Dy, Dz], axes=[0,0])
fac = np.sqrt(np.prod(self._ngrid) / self._Omega)
phi_i = self.wfc_r(*ks_i, norm=True, ngrid=self._ngrid)
phi_j = self.wfc_r(*ks_j, norm=True, ngrid=self._ngrid)
pij = phi_i.conjugate() * phi_j
tdm = np.array([
np.sum(pij * Rx),
np.sum(pij * Ry),
np.sum(pij * Rz)
])
ovlap = pij.sum()
else:
# according to the above equation, G = 0 does NOT contribute to TDM.
gvec = np.dot(self.gvectors(ikpt=ks_i[1]), self._Bcell*TPI)
# planewave coefficients of the two states
phi_i = self.readBandCoeff(*ks_i, norm=norm)
phi_j = self.readBandCoeff(*ks_j, norm=norm)
tmp1 = phi_i.conjugate() * phi_j
ovlap = np.sum(tmp1)
if self._lgam:
tmp2 = phi_i * phi_j.conjugate()
# according to the above equation, G = 0 does NOT contribute to TDM.
tdm = (np.sum(tmp1[:,np.newaxis] * gvec, axis=0) -
np.sum(tmp2[:,np.newaxis] * gvec, axis=0)) / 2.
else:
tdm = np.sum(tmp1[:,np.newaxis] * gvec, axis=0)
tdm = 1j / (dE / (2*RYTOEV)) * tdm * AUTOA * AUTDEBYE
return E1, E2, dE, ovlap, tdm
def inverse_participation_ratio(self, norm=True):
'''
Calculate Inverse Paticipation Ratio (IPR) from the wavefunction. IPR is
a measure of the localization of Kohn-Sham states. For a particular KS
state \phi_j, it is defined as
\sum_n |\phi_j(n)|^4
IPR(\phi_j) = -------------------------
|\sum_n |\phi_j(n)|^2||^2
where n iters over the number of grid points.
'''
self.ipr = np.zeros((self._nspin, self._nkpts, self._nbands, 3))
for ispin in range(self._nspin):
for ikpt in range(self._nkpts):
for iband in range(self._nbands):
phi_j = self.wfc_r(ispin+1, ikpt+1, iband+1,
norm=norm)
phi_j_abs = np.abs(phi_j)
print('Calculating IPR of #spin %4d, #kpt %4d, #band %4d' % (ispin+1, ikpt+1, iband+1))
self.ipr[ispin, ikpt, iband, 0] = self._kpath[ikpt] if self._kpath is None else 0
self.ipr[ispin, ikpt, iband, 1] = self._bands[ispin, ikpt, iband]
self.ipr[ispin, ikpt, iband, 2] = np.sum(phi_j_abs**4) / np.sum(phi_j_abs**2)**2
np.save('ipr.npy', self.ipr)
return self.ipr
def elf(self, kptw, ngrid=None, warn=True):
'''
Calculate the electron localization function (ELF) from WAVECAR.
The following formula was extracted from VASP ELF.F:
_
h^2 * 2 T.........kinetic energy
T = -2 --- Psi grad Psi T+TCORR...pos.definite kinetic energy
^ 2 m TBOS......T of an ideal Bose-gas
^
I am not sure if we need to times 2 here, use 1 in this
script.
_ (=infimum of T+TCORR)
1 h^2 2 DH........T of hom.non-interact.e- - gas
TCORR= - --- grad rho (acc.to Fermi)
2 2 m ELF.......electron-localization-function
_ 2
1 h^2 |grad rho|
TBOS = - --- ---------- D = T + TCORR - TBOS
4 2 m rho
_ \ 1
3 h^2 2/3 5/3 =====> ELF = ------------
DH = - --- (3 Pi^2) rho / D 2
5 2 m 1 + ( ---- )
DH
REF:
1. Nature, 371, 683-686 (1994)
2. Becke and Edgecombe, J. Chem. Phys., 92, 5397(1990)
3. M. Kohout and A. Savin, Int. J. Quantum Chem., 60, 875-882(1996)
4. http://www2.cpfs.mpg.de/ELF/index.php?content=06interpr.txt
'''
if warn:
print("""
###################################################################
If you are using VESTA to view the resulting ELF, please rename the
output file as ELFCAR, otherwise there will be some error in the
isosurface plot!
When CHG*/PARCHG/*.vasp are read in to visualize isosurfaces and
sections, data values are divided by volume in the unit of bohr^3.
The unit of charge densities input by VESTA is, therefore, bohr^3.
For LOCPOT/ELFCAR files, volume data are kept intact.
You can turn off this warning by setting "warn=False" in the "elf"
method.
###################################################################
""")
# the k-point weights
kptw = np.array(kptw, dtype=float)
assert kptw.shape == (self._nkpts,), "K-point weights must be provided \
to calculate charge density!"
# normalization
kptw /= kptw.sum()
if ngrid is None:
ngrid = self._ngrid * 2
else:
ngrid = np.array(ngrid, dtype=int)
assert ngrid.shape == (3,)
assert np.alltrue(ngrid >= self._ngrid), \
"Minium FT grid size: (%d, %d, %d)" % \
(self._ngrid[0], self._ngrid[1], self._ngrid[2])
fx = [ii if ii < ngrid[0] // 2 + 1 else ii - ngrid[0]
for ii in range(ngrid[0])]
fy = [jj if jj < ngrid[1] // 2 + 1 else jj - ngrid[1]
for jj in range(ngrid[1])]
fz = [kk if kk < ngrid[2] // 2 + 1 else kk - ngrid[2]
for kk in range(ngrid[2])]
# plane-waves: Reciprocal coordinate
# indexing = 'ij' so that outputs are of shape (ngrid[0], ngrid[1], ngrid[2])
Dx, Dy, Dz = np.meshgrid(fx, fy, fz, indexing='ij')
# plane-waves: Cartesian coordinate
Gx, Gy, Gz = np.tensordot(self._Bcell * np.pi * 2, [Dx, Dy, Dz], axes=(0,0))
# the norm squared of the G-vectors
G2 = Gx**2 + Gy**2 + Gz**2
# k-points vectors in Cartesian coordinate
vkpts = np.dot(self._kvecs, self._Bcell * 2 * np.pi)
# normalization factor so that
# \sum_{ijk} | \phi_{ijk} | ^ 2 * volume / Ngrid = 1
normFac = np.sqrt(np.prod(ngrid) / self._Omega)
# electron localization function
ElectronLocalizationFunction = []
# Charge density
rho = np.zeros(ngrid, dtype=complex)
# Kinetic energy density
tau = np.zeros(ngrid, dtype=complex)
for ispin in range(self._nspin):
# initialization
rho[...] = 0.0
tau[...] = 0.0
for ikpt in range(self._nkpts):
# plane-wave G-vectors
igvec = self.gvectors(ikpt+1)
# for gamma-only version, complete the missing -G vectors
if self._lgam:
tmp = np.array([-k for k in igvec[1:]], dtype=int)
igvec = np.vstack([igvec, tmp])
# plane-wave G-vectors in Cartesian coordinate
rgvec = np.dot(igvec, self._Bcell * 2 * np.pi)
k = vkpts[ikpt] # k
gk = rgvec + k[np.newaxis,:] # G + k
gk2 = np.linalg.norm(gk, axis=1)**2 # | G + k |^2
for iband in range(self._nbands):
# omit the empty bands
if self._occs[ispin, ikpt, iband] == 0.0: continue
rspin = 2.0 if self._nspin == 1 else 1.0
weight = rspin * kptw[ikpt] * self._occs[ispin, ikpt, iband]
# if self._lgam:
# ########################################
# # slower
# ########################################
# # wavefunction in real space
# # VASP does NOT do normalization in elf.F
# phi_r = self.wfc_r(ispin=ispin+1, ikpt=ikpt+1,
# iband=iband+1,
# ngrid=ngrid,
# norm=False) * normFac
# # wavefunction in reciprocal space
# phi_q = np.fft.fftn(phi_r, norm='ortho')
# # grad^2 \phi in reciprocal space
# lap_phi_q = -gk2 * phi_q
# # grad^2 \phi in real space
# lap_phi_r = np.fft.ifftn(lap_phi_q, norm='ortho')
# else:
########################################
# faster
########################################
# wavefunction in reciprocal space
# VASP does NOT do normalization in elf.F
phi_q = self.readBandCoeff(ispin=ispin+1, ikpt=ikpt+1,
iband=iband+1,
norm=False)
# pad the missing planewave coefficients for -G vectors
if self._lgam:
tmp = [x.conj() for x in phi_q[1:]]
phi_q = np.concatenate([phi_q, tmp])
# Gamma only, divide a factor of sqrt(2.0) except for
# G=0
phi_q /= np.sqrt(2.0)
phi_q[0] *= np.sqrt(2.0)
# wavefunction in real space
phi_r = self.wfc_r(ispin=ispin+1, ikpt=ikpt+1,
iband=iband+1,
ngrid=ngrid,
gvec=igvec,
Cg=phi_q,
norm=True) * normFac
# grad^2 \phi in reciprocal space
lap_phi_q = -gk2 * phi_q
# grad^2 \phi in real space
lap_phi_r = self.wfc_r(ispin=ispin+1, ikpt=ikpt+1,
iband=iband+1,
ngrid=ngrid,
gvec=igvec,
Cg=lap_phi_q) * normFac
# \phi* grad^2 \phi in real space --> kinetic energy density
tau += -phi_r * lap_phi_r.conj() * weight
# charge density in real space
rho += phi_r.conj() * phi_r * weight
# charge density in reciprocal space
rho_q = np.fft.fftn(rho, norm='ortho')
# grad^2 rho: laplacian of charge density
lap_rho_q = -G2 * rho_q
lap_rho_r = np.fft.ifftn(lap_rho_q, norm='ortho')
# charge density gradient: grad rho
########################################
# wrong method for gradient using FFT
########################################
# grad_rho_x = np.fft.ifft(1j * Gx * np.fft.fft(rho, axis=0), axis=0)
# grad_rho_y = np.fft.ifft(1j * Gy * np.fft.fft(rho, axis=1), axis=1)
# grad_rho_z = np.fft.ifft(1j * Gz * np.fft.fft(rho, axis=2), axis=2)
########################################
# correct method for gradient using FFT
########################################
grad_rho_x = np.fft.ifftn(1j * Gx * rho_q, norm='ortho')
grad_rho_y = np.fft.ifftn(1j * Gy * rho_q, norm='ortho')
grad_rho_z = np.fft.ifftn(1j * Gz * rho_q, norm='ortho')
grad_rho_sq = np.abs(grad_rho_x)**2 \
+ np.abs(grad_rho_y)**2 \
+ np.abs(grad_rho_z)**2
rho = rho.real
tau = tau.real
lap_rho_r = lap_rho_r.real
Cf = 3./5 * (3.0 * np.pi**2)**(2./3)
Dh = np.where(rho > 0.0,
Cf * rho**(5./3),
0.0)
eps = 1E-8 / HSQDTM
Dh[Dh < eps] = eps
# D0 = T + TCORR - TBOS
D0 = tau + 0.5 * lap_rho_r - 0.25 * grad_rho_sq / rho
ElectronLocalizationFunction.append(1. / (1. + (D0 / Dh)**2))
return ElectronLocalizationFunction
############################################################
if __name__ == '__main__':
# xx = vaspwfc('wavecar')
# phi = xx.wfc_r(1, 30, 17, ngrid=(28, 28, 252))
# xx.save2vesta(phi, poscar='POSCAR')
# xx = vaspwfc('./gamma/WAVECAR')
# phi = xx.wfc_r(1, 1, 317, ngrid=(60, 108, 160),
# gamma=True)
# xx.save2vesta(phi, poscar='./gamma/POSCAR',gamma=True)
# xx = vaspwfc('WAVECAR')
# dE, ovlap, tdm = xx.TransitionDipoleMoment([1,30,17], [1,30,18], norm=True)
# print dE, ovlap.real, np.abs(tdm)**2
# print xx._recl, xx._nspin, xx._rtag
# print xx._nkpts, xx._nbands, xx._encut
# print xx._Acell, xx._Bcell
# # print np.linalg.norm(xx._Acell, axis=1)
# print xx._ngrid
# print xx._bands[0,0,:]
# print xx._kvecs
# print xx._kpath
# b = xx.readBandCoeff(1,1,1)
# xx = np.savetxt('kaka.dat', xx.gvectors(2), fmt='%5d')
# gvec = xx.gvectors(1)
# gvec %= xx._ngrid[np.newaxis, :]
# print gvec
# ngrid=(28, 28, 252)
# phi = xx.wfc_r(1, 30, 17, ngrid=(28, 28, 252))
# header = open('POSCAR').read()
# with open('wave_real.vasp', 'w') as out:
# out.write(header)
# out.write('%5d%5d%5d\n' % (ngrid[0], ngrid[1], ngrid[2]))
# nwrite=0
# for kk in range(ngrid[2]):
# for jj in range(ngrid[1]):
# for ii in range(ngrid[0]):
# nwrite += 1
# out.write('%22.16f ' % phi.real[ii,jj,kk])
# if nwrite % 10 == 0:
# out.write('\n')
# with open('wave_imag.vasp', 'w') as out:
# out.write(header)
# out.write('%5d%5d%5d\n' % (ngrid[0], ngrid[1], ngrid[2]))
# nwrite=0
# for kk in range(ngrid[2]):
# for jj in range(ngrid[1]):
# for ii in range(ngrid[0]):
# nwrite += 1
# out.write('%22.16f ' % phi.imag[ii,jj,kk])
# if nwrite % 10 == 0:
# out.write('\n')
# xx = vaspwfc('wave_tyz')
# ipr = xx.inverse_participation_ratio()
# print xx._nbands, xx._nkpts
#
# import matplotlib as mpl
# import matplotlib.pyplot as plt
#
# fig = plt.figure()
# ax = plt.subplot()
#
# ax.scatter(ipr[...,0], ipr[..., 1], s=ipr[..., 2] / ipr[..., 2].max() * 10, c=ipr[..., 2],
# cmap='jet_r')
#
# plt.show()
wfc = vaspwfc('WAVECAR', lgamma=True, gamma_half='x')
# ngrid = [80, 140, 210]
phi = wfc.wfc_r(iband=190)
rho = np.abs(phi)**2
# rho2 = VaspChargeDensity('PARCHG.0158.ALLK').chg[0]
# rho /= rho.sum()
# rho2 /= rho2.sum()
# rho3 = rho - rho2
wfc.save2vesta(rho, lreal=True)
pass

View File

@@ -0,0 +1,528 @@
import numpy as np
import scipy.integrate as integrate
from scipy.optimize import minimize
import numbers
import typing
from tqdm import tqdm
from echem.core.useful_funcs import nearest_array_index, ClassMethods
E_F_SHE_VAC = -4.5 # Fermi Energy of Standard Hydrogen Electrode with respect to vacuum
class GM(ClassMethods):
"""This class calculates the final Fermi and Redox species distributions according
to the Gerischer-Marcus formalism.
Parameters:
-----------
DOS: np.ndarray, optional
The values of DOS in 1D numpy array. If not specified values will be taken from saved data.
E: np.ndarray, optional
The corresponding to the DOS energy mesh. If not specified values will be taken from saved data.
efermi: np.ndarray. optional
System Fermi level. If not specified values will be taken from saved data.
vacuum_lvl: np.ndarray, optional
System vacuum level. If not specified values will be taken from saved data.
"""
def __init__(self, path_to_data='Saved_data', DOS=None, E=None, efermi=None, vacuum_lvl=None):
# variables that might be defined through __init__ function
self.E = E
self.DOS = DOS
self.efermi = efermi
self.vacuum_lvl = vacuum_lvl
# variables that should be defined through set_params function
self.C_EDL = None
self.T = None
self.l = None
self.sheet_area = None
# variables that will be created during calculations
self.sigma_Q_arr = None
# variable that define numerical parameters of quantum charge calculation
self.__SIGMA_0 = 0.5
self.__SIGMA_ACCURACY = 1e-3
self.__SIGMA_RANGE = 4
if DOS is None:
try:
self.DOS = np.load(path_to_data + '/DOS.npy')
except OSError:
print('File DOS.npy does not exist')
if E is None:
try:
self.E = np.load(path_to_data + '/E.npy')
except OSError:
print('File E_DOS.npy does not exist')
if efermi is None:
try:
self.efermi = np.load(path_to_data + '/efermi.npy')
except OSError:
print('File efermi.npy does not exist')
if vacuum_lvl is None:
try:
self.vacuum_lvl = np.load(path_to_data + '/vacuum_lvl.npy')
except OSError:
print('File vacuum_lvl.npy does not exist')
def set_params(self, C_EDL, T, l, sheet_area):
"""Sets parameters of calculation
Parameters:
----------
C_EDL: float, str
float: Capacitance of electric double layer (microF/cm^2)
str: 'Q' calculating in the Quantum Capacitance Dominating limit (C_Q << C_EDL)
str: 'Cl' calculating in the Classical limit (C_Q >> C_EDL)
T: int, float
Temperature. It is used in computing Fermi function and distribution function of redox system states
l: float
Reorganization energy in eV
"""
self.C_EDL = C_EDL
self.T = T
self.l = l
self.sheet_area = sheet_area
def set_params_advance(self, SIGMA_0=0.5, ACCURACY_SIGMA=1e-3, SIGMA_RANGE=4):
"""
Sets numerical parameters that are used in quantum charge density calculations. Delete cashed
results of charge calculations.
Args:
SIGMA_0: float, optional
Initial guess for charge at equilibrium
ACCURACY_SIGMA: float, optional
Accuracy of charge calculation
SIGMA_RANGE: float, optional
It defines the minimum and maximum calculated charge
"""
self.__SIGMA_0 = SIGMA_0
self.__SIGMA_ACCURACY = ACCURACY_SIGMA
self.__SIGMA_RANGE = SIGMA_RANGE
self.sigma_Q_arr = None
@staticmethod
def fermi_func(E, T):
"""
Calculates Fermi-Dirac Distribution
Args:
E: Energies
T: Temperature in K
"""
k = 8.617e-5 # eV/K
return 1 / (1 + np.exp(E / (k * T)))
@staticmethod
def W_ox(E, T, l):
"""
Distribution of oxidized states
Args:
E (np.array): Energies
T (float): Temperature
l (float): Reorganization energy
"""
k = 8.617e-5 # eV/K
W_0 = (1 / np.sqrt(4 * k * T * l))
return W_0 * np.exp(- (E - l) ** 2 / (4 * k * T * l))
@staticmethod
def W_red(E, T, l):
"""
Distribution of reduced states
Args:
E (np.array): Energies
T (float): Temperature
l (float): Reorganization energy
"""
k = 8.617e-5 # eV/K
W_0 = (1 / np.sqrt(4 * k * T * l))
return W_0 * np.exp(- (E + l) ** 2 / (4 * k * T * l))
def compute_C_quantum(self, dE_Q_arr):
"""
Calculates differential quantum capacitance
Q = e * int{DOS(E) * [f(E) - f(E + deltaE)] dE}
C_Q = - dQ/d(deltaE) = - (e / (4*k*T)) * int{DOS(E) * sech^2[(E+deltaE)/(2*k*T)] dE}
Args:
dE_Q_arr (np.array, float): Energy shift at which C_Q is calculated
Returns:
Quantum capacitance in accordance with energy displacement(s)
TODO check constants
"""
self.check_existence('T')
self.check_existence('sheet_area')
k = 8.617e-5 # eV/K
elementary_charge = 1.6e-19 # C
k_1 = 1.38e-23 # J/K
const = (1e6 * elementary_charge ** 2) / (4 * k_1 * self.sheet_area) # micro F / cm^2
if isinstance(dE_Q_arr, typing.Iterable):
C_q_arr = np.zeros_like(dE_Q_arr)
for i, dE_Q in enumerate(dE_Q_arr):
E_2 = self.E - dE_Q # energy range for cosh function
cosh = np.cosh(E_2 / (2 * k * self.T))
integrand = (self.DOS / cosh) / cosh
C_q = (const / self.T) * integrate.simps(integrand, self.E)
C_q_arr[i] = C_q
return C_q_arr
def compute_C_total(self, E_diff_arr, add_info=False):
sigma_arr = np.zeros_like(E_diff_arr)
for i, E_diff in tqdm(enumerate(E_diff_arr), total=len(E_diff_arr)):
sigma_arr[i] = self.compute_sigma(E_diff, sigma_0=sigma_arr[i-1])
C_tot_arr = np.zeros_like(E_diff_arr)
C_Q_arr = np.zeros_like(E_diff_arr)
for i, (E_diff, sigma) in enumerate(zip(E_diff_arr, sigma_arr)):
ind = nearest_array_index(self.sigma_Q_arr, sigma)
E_step = self.__SIGMA_ACCURACY
E_start = - self.__SIGMA_RANGE
dE_Q = E_start + E_step * ind
C_Q = self.compute_C_quantum([dE_Q])
C_Q_arr[i] = C_Q[0]
C_tot = C_Q * self.C_EDL / (C_Q + self.C_EDL)
C_tot_arr[i] = C_tot
if add_info is False:
return C_tot_arr
else:
return C_tot_arr, C_Q_arr, sigma_arr
def compute_sigma_EDL(self, dE_EDL):
"""
Calculates charge corresponding to the potential drop of -dE_EDL/|e|.
Takes into account integral capacitance C_EDL
Args:
dE_EDL (float, np.array): Electron energy shift due to potential drop
Returns:
Charge or Sequence of charges
"""
self.check_existence('C_EDL')
return - self.C_EDL * dE_EDL
def compute_sigma_quantum(self, dE_Q_arr):
"""
Computes surface charge density induced by depletion or excess of electrons
Parameters:
----------
dE_Q_arr: np.ndarray, float
Shift in Fermi level due to quantum capacitance
Returns:
-------
sigmas: np.ndarray, float
Computed values (or one value) of surface charge densities
"""
self.check_existence('T')
self.check_existence('sheet_area')
elementary_charge = 1.6e-13 # micro coulomb
if isinstance(dE_Q_arr, typing.Iterable):
y_fermi = self.fermi_func(self.E, self.T)
sigmas = []
for dE_Q in dE_Q_arr:
E_2 = self.E - dE_Q # energy range for shifted Fermi_Dirac function
y_fermi_shifted = self.fermi_func(E_2, self.T)
integrand = self.DOS * (y_fermi - y_fermi_shifted)
sigma = (elementary_charge / self.sheet_area) * integrate.simps(integrand, self.E)
sigmas.append(sigma)
return sigmas
elif isinstance(dE_Q_arr, numbers.Real):
y_fermi = self.fermi_func(self.E, self.T)
E_2 = self.E - dE_Q_arr # energy range for shifted Fermi_Dirac function
y_fermi_shifted = self.fermi_func(E_2, self.T)
integrand = self.DOS * (y_fermi - y_fermi_shifted)
sigma = (elementary_charge / self.sheet_area) * integrate.simps(integrand, self.E)
return sigma
else:
raise TypeError(f'Invalid type of dE_Q_arr: {type(dE_Q_arr)}')
def compute_sigma(self, E_diff, sigma_0=None):
def error_E_diff(sigma, E_diff, sigma_Q_arr):
ind = nearest_array_index(sigma_Q_arr, sigma)
dE_Q = E_start + E_step * ind
dE_EDL = - sigma / self.C_EDL
dE_total = dE_Q + dE_EDL
return (dE_total - E_diff) ** 2
for var in ['T', 'l', 'C_EDL']:
self.check_existence(var)
E_step = self.__SIGMA_ACCURACY
E_start = - self.__SIGMA_RANGE
if sigma_0 is None:
sigma_0 = self.__SIGMA_0
# check if we've already calculated sigma_Q_arr in another run
if self.sigma_Q_arr is None:
E_range = np.arange(E_start, -E_start, E_step)
sigma_Q_arr = self.compute_sigma_quantum(E_range)
self.sigma_Q_arr = sigma_Q_arr
else:
sigma_Q_arr = self.sigma_Q_arr
result = minimize(error_E_diff, np.array([sigma_0]), args=(E_diff, sigma_Q_arr))
sigma = result.x[0]
return sigma
def compute_distributions(self, V_std, overpot=0, reverse=False, add_info=False):
"""Computes Fermi-Dirac and Redox species distributions according to Gerischer-Markus formalism
with Quantum Capacitance
Parameters:
----------
V_std: float
Standard potential (vs SHE) of a redox couple (Volts)
overpot: float, optional
Overpotential (Volts). It shifts the electrode Fermi energy to -|e|*overpot
reverse: bool, optional
If reverse is False the process of electron transfer from electrode to the oxidized state of the
redox species is considered and vice versa
add_info: bool, optional
If False the func returns Fermi-Dirac and Redox species distributions
If True additionally returns dE_Q (Fermi energy shift due to the quantum capacitance),
sigma (surface charge) and E_diff (the whole energy shift with respect to the original Fermi level)
Returns:
-------
y_fermi: np.array
Fermi-Dirac distribution
y_redox: np.array
Redox species distributions
dE_Q: np.array, optional (if add_info == True)
Total shift of the Fermi energy due to the Quantum Capacitance
sigma: np.array, optional (if add_info == True)
surface charge in microF/cm^2
E_F_redox: np.array, optional (if add_info == True)
The sum of two energy displacement of the electrode due to the difference in Fermi level of Redox couple
and the electrode and overpotential. It splits into dE_Q and dE_EDL
"""
E_F_redox = E_F_SHE_VAC - self.efermi - V_std + self.vacuum_lvl - overpot
sigma = self.compute_sigma(E_F_redox)
ind = nearest_array_index(self.sigma_Q_arr, sigma)
E_step = self.__SIGMA_ACCURACY
E_start = - self.__SIGMA_RANGE
dE_Q = E_start + E_step * ind
E_fermi = self.E - dE_Q
E_DOS_redox = self.E - dE_Q - overpot
if reverse:
y_fermi = 1 - self.fermi_func(E_fermi, self.T)
y_redox = self.W_red(E_DOS_redox, self.T, self.l)
else:
y_fermi = self.fermi_func(E_fermi, self.T)
y_redox = self.W_ox(E_DOS_redox, self.T, self.l)
if not add_info:
return y_fermi, y_redox
else:
return y_fermi, y_redox, dE_Q, sigma, E_F_redox
def compute_k_HET(self, V_std_pot_arr, overpot_arr, reverse=False, add_info=False):
"""Computes integral k_HET using Gerischer-Markus formalism with quantum capacitance
Parameters:
----------
V_std_pot_arr: float, np.ndarray
A range of varying a standard potential
overpot_arr: float, np.ndarray
A range of varying an overpotential
reverse: bool, optional
if reverse is False the process of electron transfer from electrode to the oxidized state of the
redox mediator is considered and vice versa
Returns:
-------
k_HET: np.array
Calculated heterogeneous electron transfer rate constant according to Gerischer-Marcus model with quantum
capacitance
dE_Q_arr: np.ndarray, optional (if add_info == True)
Total shift of the Fermi energy due to the Quantum Capacitance for all calculated redox potentials or
overpotentials
sigma_arr: np.ndarray, optional (if add_info == True)
surface charge in microF/cm^2 for all calculated redox potentials or overpotentials
E_F_redox_arr: np.ndarray, optional (if add_info == True)
The sum of two energy displacement of the electrode due to the difference in Fermi level of Redox couple
and the electrode and overpotential. It splits into dE_Q and dE_EDL. For all calculated redox potentials
or overpotentials
y_fermi_arr: 2D np.ndarray, optional (if add_info == True)
Fermi-Dirac distribution for all calculated redox potentials or overpotentials
y_redox_arr: 2D np.ndarray, optional (if add_info == True)
Redox species distributions for all calculated redox potentials or overpotentials
"""
if isinstance(self.C_EDL, numbers.Real):
if isinstance(V_std_pot_arr, typing.Iterable) and isinstance(overpot_arr, numbers.Real):
k_HET = np.zeros_like(V_std_pot_arr)
if not add_info:
for i, V_std in tqdm(enumerate(V_std_pot_arr), total=len(V_std_pot_arr)):
y_fermi, y_redox = self.compute_distributions(V_std, reverse=reverse, overpot=overpot_arr)
integrand = self.DOS * y_fermi * y_redox
k_HET[i] = integrate.simps(integrand, self.E) / self.sheet_area
return k_HET
else:
dE_Q_arr = np.zeros_like(V_std_pot_arr)
sigma_arr = np.zeros_like(V_std_pot_arr)
E_F_redox_arr = np.zeros_like(V_std_pot_arr)
y_fermi_arr = np.zeros((len(V_std_pot_arr), len(self.E)))
y_redox_arr = np.zeros((len(V_std_pot_arr), len(self.E)))
for i, V_std in tqdm(enumerate(V_std_pot_arr), total=len(V_std_pot_arr)):
y_fermi, y_redox, dE_Q, sigma, E_F_redox = self.compute_distributions(V_std, reverse=reverse,
overpot=overpot_arr,
add_info=add_info)
integrand = self.DOS * y_fermi * y_redox
k_HET[i] = integrate.simps(integrand, self.E) / self.sheet_area
dE_Q_arr[i] = dE_Q
sigma_arr[i] = sigma
E_F_redox_arr[i] = E_F_redox
y_fermi_arr[i] = y_fermi
y_redox_arr[i] = y_redox
return k_HET, dE_Q_arr, sigma_arr, E_F_redox_arr, y_fermi_arr, y_redox_arr
elif isinstance(overpot_arr, typing.Iterable) and isinstance(V_std_pot_arr, numbers.Real):
k_HET = np.zeros_like(overpot_arr)
if not add_info:
for i, overpot in tqdm(enumerate(overpot_arr), total=len(overpot_arr)):
y_fermi, y_redox = self.compute_distributions(V_std_pot_arr, reverse=reverse, overpot=overpot)
integrand = self.DOS * y_fermi * y_redox
k_HET[i] = integrate.simps(integrand, self.E) / self.sheet_area
return k_HET
else:
dE_Q_arr = np.zeros_like(overpot_arr)
sigma_arr = np.zeros_like(overpot_arr)
E_F_redox_arr = np.zeros_like(overpot_arr)
y_fermi_arr = np.zeros((len(overpot_arr), len(self.E)))
y_redox_arr = np.zeros((len(overpot_arr), len(self.E)))
for i, overpot in tqdm(enumerate(overpot_arr), total=len(overpot_arr)):
y_fermi, y_redox, dE_Q, sigma, E_F_redox = self.compute_distributions(V_std_pot_arr,
reverse=reverse,
overpot=overpot,
add_info=add_info)
integrand = self.DOS * y_fermi * y_redox
k_HET[i] = integrate.simps(integrand, self.E) / self.sheet_area
dE_Q_arr[i] = dE_Q
sigma_arr[i] = sigma
E_F_redox_arr[i] = E_F_redox
y_fermi_arr[i] = y_fermi
y_redox_arr[i] = y_redox
return k_HET, dE_Q_arr, sigma_arr, E_F_redox_arr, y_fermi_arr, y_redox_arr
else:
raise ValueError('One and only one type of V_std_pot_arr and overpot arr must be Sequence. The other \
must be a Real number')
elif self.C_EDL == 'Cl':
if isinstance(V_std_pot_arr, typing.Iterable) and isinstance(overpot_arr, numbers.Real):
E_fermi = self.E
E_DOS_redox = self.E - overpot_arr
if reverse:
y_fermi = 1 - self.fermi_func(E_fermi, self.T)
y_redox = self.W_red(E_DOS_redox, self.T, self.l)
else:
y_fermi = self.fermi_func(E_fermi, self.T)
y_redox = self.W_ox(E_DOS_redox, self.T, self.l)
integrand = self.DOS * y_fermi * y_redox
k_HET = np.ones_like(V_std_pot_arr) * integrate.simps(integrand, self.E)
return k_HET
elif isinstance(overpot_arr, typing.Sequence) and isinstance(V_std_pot_arr, numbers.Real):
k_HET = np.zeros_like(overpot_arr)
for i, overpot in tqdm(enumerate(overpot_arr), total=len(overpot_arr)):
E_fermi = self.E
E_DOS_redox = self.E - overpot
if reverse:
y_fermi = 1 - self.fermi_func(E_fermi, self.T)
y_redox = self.W_red(E_DOS_redox, self.T, self.l)
else:
y_fermi = self.fermi_func(E_fermi, self.T)
y_redox = self.W_ox(E_DOS_redox, self.T, self.l)
integrand = self.DOS * y_fermi * y_redox
k_HET[i] = integrate.simps(integrand, self.E)
return k_HET
else:
raise ValueError('One and only one type of V_std_pot_arr and overpot arr must be Sequence. The other \
must be Real number')
elif self.C_EDL == 'Q':
if isinstance(V_std_pot_arr, typing.Iterable) and isinstance(overpot_arr, numbers.Real):
k_HET = np.zeros_like(V_std_pot_arr)
for i, V_std in tqdm(enumerate(V_std_pot_arr), total=len(V_std_pot_arr)):
E_F_redox = E_F_SHE_VAC - self.efermi - V_std + self.vacuum_lvl
E_DOS_redox = self.E - E_F_redox
E_fermi = E_DOS_redox - overpot_arr
if reverse:
y_fermi = 1 - self.fermi_func(E_fermi, self.T)
y_redox = self.W_red(E_DOS_redox, self.T, self.l)
else:
y_fermi = self.fermi_func(E_fermi, self.T)
y_redox = self.W_ox(E_DOS_redox, self.T, self.l)
integrand = self.DOS * y_fermi * y_redox
k_HET[i] = integrate.simps(integrand, self.E)
return k_HET
elif isinstance(overpot_arr, typing.Iterable) and isinstance(V_std_pot_arr, numbers.Real):
k_HET = np.zeros_like(overpot_arr)
for i, overpot in tqdm(enumerate(overpot_arr), total=len(overpot_arr)):
E_F_redox = E_F_SHE_VAC - self.efermi - V_std_pot_arr + self.vacuum_lvl - overpot
E_fermi = self.E - E_F_redox
E_DOS_redox = self.E - E_F_redox - overpot
if reverse:
y_fermi = 1 - self.fermi_func(E_fermi, self.T)
y_redox = self.W_red(E_DOS_redox, self.T, self.l)
else:
y_fermi = self.fermi_func(E_fermi, self.T)
y_redox = self.W_ox(E_DOS_redox, self.T, self.l)
integrand = self.DOS * y_fermi * y_redox
k_HET[i] = integrate.simps(integrand, self.E)
return k_HET
else:
raise ValueError('One and only one type of V_std_pot_arr and overpot arr must be Sequence. The other \
must be Real number')

View File

@@ -0,0 +1,514 @@
import numpy as np
from . import tip_types
from .GerischerMarkus import GM
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
from mpl_toolkits.axes_grid1 import make_axes_locatable
from pymatgen.io.vasp import Procar
from echem.core import constants
from echem.io_data import vasp
class kHET:
"""
This class calculates heterogeneous electron transfer rate constant with spatial resolution
"""
# TODO update tips types
AVAILABLE_TIPS_TYPES = ['oxygen', 'IrCl6', 'RuNH3_6', 'RuNH3_6_NNN_plane', 'RuNH3_6_perpendicular',
'oxygen_parallel_x', 'oxygen_parallel_y']
def __init__(self, working_folder=''):
if working_folder == '':
working_folder = '.'
# TODO think about better than get class objects for outcar and poscar info
self.outcar = vasp.Outcar.from_file(working_folder + '/OUTCAR')
self.poscar = vasp.Poscar.from_file(working_folder + '/POSCAR')
self.procar = Procar(working_folder + '/PROCAR')
self.working_folder = working_folder
self.path_to_data = working_folder + '/Saved_data'
self.wavecar = None
self.C_EDL = None
self.T = None
self.lambda_ = None
self.sheet_area = None
self.V_std = None
self.overpot = None
self.dE_Q = None
self.kb_array = None
self.E = None
def set_parameters(self, T, lambda_, overpot=0, V_std=None, C_EDL=None, dE_Q=None, linear_constant=26, threshold_value=1e-5, shifted_DOS='all'):
"""
:param C_EDL: float
Capacitance of electric double layer (microF/cm^2)
:param T: int, float
Temperature. It is used in computing Fermi function and distribution function of redox system states
:param lambda_: float
Reorganization energy in eV
:param V_std: float
Standart potential of the redox couple (Volts)
:param overpot: float
Overpotential (Volts). It shifts the electrode Fermi energy to -|e|*overpot
:return:
"""
#TODO check get_DOS parameters
self.E, DOS = self.outcar.get_DOS(zero_at_fermi=True, smearing='Gaussian', sigma=0.1, dE = 0.01)
self.T = T
self.lambda_ = lambda_
self.overpot = overpot
self.sheet_area = self._get_sheet_area() # Area of investigated surface(XY) in cm^2
self.linear_constant = linear_constant
if V_std == None or C_EDL == None:
if dE_Q == None:
raise ValueError ("Set either V_std and C_EDL or dE_Q parameter")
else:
self.dE_Q = dE_Q
else:
self.V_std = V_std
self.C_EDL = C_EDL
self.dE_Q = self._calculate_dE_Q()
if shifted_DOS == 'all':
self.are_frozen_states = False
self.kb_array = self._get_kb_array(threshold_value)
else:
self.are_frozen_states = True
self.kb_array = self._get_kb_array(threshold_value, shifted_DOS)
if self.kb_array == []:
print('Error! kb_array is empty. Try to decrease threshold_value')
def load_wavecar(self):
self.wavecar = vasp.Wavecar.from_file(self.working_folder + '/WAVECAR', self.kb_array)
def plot_distributions(self):
#TODO make it
pass
def _get_atom_localization(self, kpoint, band, target_atom_types):
for key in self.procar.data.keys():
procar_data = self.procar.data[key]
list_of_ions = []
atomnames = self.poscar.structure.species
orbital_names = self.procar.orbitals
for i, name in enumerate(atomnames):
if name in target_atom_types:
list_of_ions.append(i)
list_of_orbitals = [i for i in range(len(orbital_names))]
localization = 0
for ion in list_of_ions:
for orb in list_of_orbitals:
localization += procar_data[kpoint - 1][band - 1][ion][orb]
return localization
def _get_kb_array(self, threshold_value, shifted_DOS='all', threshold_for_localization_to_be_shifted=0.4):
if shifted_DOS=='all':
fermi_distribution = GM.fermi_func(self.E - self.dE_Q, self.T)
W_ox = GM.W_ox(self.E - self.dE_Q - self.overpot, self.T, self.lambda_)
E_satisfy_mask = W_ox * fermi_distribution > np.max(W_ox) * np.max(fermi_distribution) * threshold_value
list_of_E_indices = [i for i, E in enumerate(E_satisfy_mask) if E]
min_E_ind = min(list_of_E_indices)
max_E_ind = max(list_of_E_indices)
Erange = [self.E[min_E_ind], self.E[max_E_ind]]
kb_array = []
for band in range(1, self.outcar.nbands + 1):
for kpoint in range(1, self.outcar.nkpts + 1):
energy = self.outcar.eigenvalues[0][kpoint - 1][band - 1] - self.outcar.efermi
if energy >= Erange[0] and energy < Erange[1]:
kb_array.append([kpoint, band])
else:
fermi_distribution_shifted = GM.fermi_func(self.E - self.dE_Q, self.T)
W_ox_shifted = GM.W_ox(self.E - self.dE_Q - self.overpot, self.T, self.lambda_)
E_satisfy_mask_shifted = W_ox_shifted * fermi_distribution_shifted > np.max(W_ox_shifted) * np.max(
fermi_distribution_shifted) * threshold_value
list_of_E_indices_shifted = [i for i, E in enumerate(E_satisfy_mask_shifted) if E]
min_E_ind_shifted = min(list_of_E_indices_shifted)
max_E_ind_shifted = max(list_of_E_indices_shifted)
Erange_shifted = [self.E[min_E_ind_shifted], self.E[max_E_ind_shifted]]
fermi_distribution_frozen = GM.fermi_func(self.E - 0, self.T)
W_ox_frozen = GM.W_ox(self.E - 0 - self.overpot, self.T, self.lambda_)
E_satisfy_mask_frozen = W_ox_frozen * fermi_distribution_frozen > np.max(W_ox_frozen) * np.max(
fermi_distribution_frozen) * threshold_value
list_of_E_indices_frozen = [i for i, E in enumerate(E_satisfy_mask_frozen) if E]
min_E_ind_frozen = min(list_of_E_indices_frozen)
max_E_ind_frozen = max(list_of_E_indices_frozen)
Erange_frozen = [self.E[min_E_ind_frozen], self.E[max_E_ind_frozen]]
kb_array = []
self.frozen_mask = []
for band in range(1, self.outcar.nbands + 1):
for kpoint in range(1, self.outcar.nkpts + 1):
energy = self.outcar.eigenvalues[0][kpoint - 1][band - 1] - self.outcar.efermi
if energy >= Erange_frozen[0] and energy < Erange_frozen[1]:
if self._get_atom_localization(kpoint, band,
target_atom_types=shifted_DOS) < threshold_for_localization_to_be_shifted:
kb_array.append([kpoint, band])
self.frozen_mask.append(1)
if energy >= Erange_shifted[0] and energy < Erange_shifted[1]:
if self._get_atom_localization(kpoint, band,
target_atom_types=shifted_DOS) >= threshold_for_localization_to_be_shifted:
kb_array.append([kpoint, band])
self.frozen_mask.append(0)
return kb_array
def calculate_kHET_spatial(self, tip_type='s', z_pos=None, from_center=False, cutoff_in_Angstroms=5, all_z=False, dim='2D', shifted_separately=False):
xlen, ylen, zlen = np.shape(self.wavecar.wavefunctions[0])
if dim == '2D':
k_HET_ = np.zeros((xlen, ylen))
if z_pos == None:
raise ValueError('For dim=2D z_pos is obligatory parameter')
b1, b2, b3 = self.poscar.structure.lattice
bn1 = b1 / xlen
bn2 = b2 / ylen
bn3 = b3 / zlen
if b3[0] != 0.0 or b3[1] != 0.0:
print("WARNING! You z_vector is not perpendicular to XY plane, Check calculate_kHET_spatial_2D function")
if from_center:
z = int(zlen // 2 + z_pos // np.linalg.norm(bn3))
else:
z = int(z_pos // np.linalg.norm(bn3))
real_z_position = z_pos // np.linalg.norm(bn3) * np.linalg.norm(bn3)
print(f"Real z_pos of tip = {real_z_position}")
if any(tip_type == kw for kw in self.AVAILABLE_TIPS_TYPES):
cutoff = int(cutoff_in_Angstroms // np.linalg.norm(bn1))
if cutoff > xlen // 2 or cutoff > ylen // 2:
print("ERROR: Cutoff should be less than 1/2 of each dimension of the cell. "
"Try to reduce cutoff. Otherwise, result could be unpredictible")
acc_orbitals = self.generate_acceptor_orbitals(tip_type, (xlen, ylen, zlen), z_shift=z)
if all_z == True:
zmin = 0
zmax = zlen
elif z - cutoff >= 0 and z + cutoff <= zlen:
zmin = z - cutoff
zmax = z + cutoff
else:
print("Can't reduce integrating area in z dimention. "
"Will calculate overlapping for all z. You can ignore this message "
"if you don't care about efficiency")
zmin = 0
zmax = zlen
for i, kb in enumerate(self.wavecar.kb_array):
kpoint, band = kb[0], kb[1]
energy = self.outcar.eigenvalues[0][kpoint - 1][band - 1] - self.outcar.efermi
weight = self.outcar.weights[0][kpoint - 1]
f_fermi = GM.fermi_func(energy - self.dE_Q, self.T)
w_redox = GM.W_ox(energy - self.dE_Q - self.overpot, self.T, self.lambda_)
overlap_integrals_squared = self._get_overlap_integrals_squared(self.wavecar.wavefunctions[i], cutoff,
acc_orbitals, zmin, zmax)
# TODO check eq below
matrix_elements_squared = overlap_integrals_squared * self.linear_constant * np.linalg.norm(bn3) ** 3
k_HET_ += matrix_elements_squared * f_fermi * w_redox * weight
elif tip_type == 's':
for i, kb in enumerate(self.wavecar.kb_array):
kpoint, band = kb[0], kb[1]
energy = self.outcar.eigenvalues[0][kpoint - 1][band - 1] - self.outcar.efermi
weight = self.outcar.weights[0][kpoint - 1]
f_fermi = GM.fermi_func(energy - self.dE_Q, self.T)
w_redox = GM.W_ox(energy - self.dE_Q - self.overpot, self.T, self.lambda_)
matrix_elements_squared = np.abs(self.wavecar.wavefunctions[i][:, :, z]) ** 2
k_HET_ += matrix_elements_squared * f_fermi * w_redox * weight
elif tip_type == 'pz':
for i, kb in enumerate(self.wavecar.kb_array):
kpoint, band = kb[0], kb[1]
energy = self.outcar.eigenvalues[0][kpoint - 1][band - 1] - self.outcar.efermi
weight = self.outcar.weights[0][kpoint - 1]
f_fermi = GM.fermi_func(energy - self.dE_Q, self.T)
w_redox = GM.W_ox(energy - self.dE_Q - self.overpot, self.T, self.lambda_)
wf_grad_z = np.gradient(self.wavecar.wavefunctions[i], axis=2)
matrix_elements_squared = np.abs(wf_grad_z[:, :, z]) ** 2
k_HET_ += matrix_elements_squared * f_fermi * w_redox * weight
else:
print(f"Try another tip type, for now {tip_type} is unavailiable")
elif dim == '3D':
k_HET_ = np.zeros((xlen, ylen, zlen))
if tip_type == 's':
if self.are_frozen_states:
if shifted_separately:
k_HET_shifted = np.zeros((xlen, ylen, zlen))
for i, kb in enumerate(self.wavecar.kb_array):
kpoint, band = kb[0], kb[1]
energy = self.outcar.eigenvalues[0][kpoint - 1][band - 1] - self.outcar.efermi
weight = self.outcar.weights[kpoint - 1]
if self.frozen_mask[i] == 1:
f_fermi = GM.fermi_func(energy, self.T)
w_redox = GM.W_ox(energy - self.overpot, self.T, self.lambda_)
matrix_elements_squared = np.abs(self.wavecar.wavefunctions[i]) ** 2
k_HET_ += matrix_elements_squared * f_fermi * w_redox * weight
else:
f_fermi = GM.fermi_func(energy - self.dE_Q, self.T)
w_redox = GM.W_ox(energy - self.dE_Q - self.overpot, self.T, self.lambda_)
matrix_elements_squared = np.abs(self.wavecar.wavefunctions[i]) ** 2
k_HET_ += matrix_elements_squared * f_fermi * w_redox * weight
try:
k_HET_shifted += matrix_elements_squared * f_fermi * w_redox * weight
except:
pass
else:
for i, kb in enumerate(self.wavecar.kb_array):
kpoint, band = kb[0], kb[1]
energy = self.outcar.eigenvalues[0][kpoint - 1][band - 1] - self.outcar.efermi
weight = self.outcar.weights[0][kpoint - 1]
f_fermi = GM.fermi_func(energy - self.dE_Q, self.T)
w_redox = GM.W_ox(energy - self.dE_Q - self.overpot, self.T, self.lambda_)
matrix_elements_squared = np.abs(self.wavecar.wavefunctions[i]) ** 2
k_HET_ += matrix_elements_squared * f_fermi * w_redox * weight
elif tip_type == 'pz':
if self.are_frozen_states:
if shifted_separately:
k_HET_shifted = np.zeros((xlen, ylen, zlen))
for i, kb in enumerate(self.wavecar.kb_array):
kpoint, band = kb[0], kb[1]
energy = self.outcar.eigenvalues[0][kpoint - 1][band - 1] - self.outcar.efermi
weight = self.outcar.weights[0][kpoint - 1]
if self.frozen_mask[i] == 1:
f_fermi = GM.fermi_func(energy, self.T)
w_redox = GM.W_ox(energy - self.overpot, self.T, self.lambda_)
wf_grad_z = np.gradient(self.wavecar.wavefunctions[i], axis=2)
matrix_elements_squared = np.abs(wf_grad_z) ** 2
k_HET_ += matrix_elements_squared * f_fermi * w_redox * weight
else:
f_fermi = GM.fermi_func(energy - self.dE_Q, self.T)
w_redox = GM.W_ox(energy - self.dE_Q - self.overpot, self.T, self.lambda_)
wf_grad_z = np.gradient(self.wavecar.wavefunctions[i], axis=2)
matrix_elements_squared = np.abs(wf_grad_z) ** 2
k_HET_ += matrix_elements_squared * f_fermi * w_redox * weight
try:
k_HET_shifted += matrix_elements_squared * f_fermi * w_redox * weight
except:
pass
else:
for i, kb in enumerate(self.wavecar.kb_array):
kpoint, band = kb[0], kb[1]
energy = self.outcar.eigenvalues[0][kpoint - 1][band - 1] - self.outcar.efermi
weight = self.outcar.weights[0][kpoint - 1]
f_fermi = GM.fermi_func(energy - self.dE_Q, self.T)
w_redox = GM.W_ox(energy - self.dE_Q - self.overpot, self.T, self.lambda_)
wf_grad_z = np.gradient(self.wavecar.wavefunctions[i], axis=2)
matrix_elements_squared = np.abs(wf_grad_z) ** 2
k_HET_ += matrix_elements_squared * f_fermi * w_redox * weight
else:
raise ValueError("dim should be 3D or 2D")
# TODO: check THIS below
#k_HET_ *= 2 * np.pi / constants.PLANCK_CONSTANT
if shifted_separately:
return k_HET_, k_HET_shifted
else:
return k_HET_
def plot_2D(self, func, show=True, save=False, filename='fig.png', method='linear', logscale=None):
"""
Function for plotting 2D images
"""
xlen, ylen = np.shape(func)
b1, b2, b3 = self.poscar.structure.lattice
bn1 = b1 / xlen
bn2 = b2 / ylen
R = []
for j in range(ylen):
for i in range(xlen):
R.append(i * bn1 + j * bn2)
X = np.array([x[0] for x in R])
Y = np.array([x[1] for x in R])
Z = func.transpose().flatten()
xi = np.linspace(X.min(), X.max(), 1000)
yi = np.linspace(Y.min(), Y.max(), 1000)
zi = griddata((X, Y), Z, (xi[None, :], yi[:, None]), method=method)
if logscale == None:
plt.contourf(xi, yi, zi, 500, cmap=plt.cm.rainbow)
else:
zi = np.log10(zi)
min, max, step = logscale
levels = np.arange(min, max, step)
plt.contourf(xi, yi, zi, 500, cmap=plt.cm.rainbow, levels=levels)
ax = plt.gca()
ax.set_aspect('equal')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="7%", pad=0.1)
cbar = plt.colorbar(cax=cax)
if show == True:
plt.show()
if save == True:
plt.savefig(filename, dpi=300)
plt.close()
def _calculate_dE_Q(self):
"""
This function calls GerischerMarcus module with GM class to calculate distribution of redox species
and Fermi-Dirac distribution according to Gerischer-Marcus formalism
:return:
"""
gm = GM(path_to_data=self.path_to_data)
gm.set_params(self.C_EDL, self.T, self.lambda_, self.sheet_area)
return gm.compute_distributions(self.V_std, overpot=self.overpot, add_info=True)[2]
def _get_sheet_area(self):
"""
Inner function to calculate sheet_area (XY plane) in cm^2
"""
b1, b2, b3 = self.poscar.structure.lattice
return np.linalg.norm(np.cross(b1, b2))*1e-16
def generate_acceptor_orbitals(self, orb_type, shape, z_shift=0, x_shift=0, y_shift=0):
"""
This is generator of acceptor orbitals using tip_types.py module
:return:
"""
bohr_radius = 0.529177 #TODO Check
b1, b2, b3 = self.poscar.structure.lattice
bn1 = b1 / shape[0]
bn2 = b2 / shape[1]
bn3 = b3 / shape[2]
basis = np.array([bn1, bn2, bn3])
transition_matrix = basis.transpose()
number_of_orbitals = len(tip_types.orbitals(orb_type))
acc_orbitals = []
for i in range(number_of_orbitals):
acc_orbitals.append(np.zeros(shape))
for i in range(shape[0]):
for j in range(shape[1]):
for k in range(shape[2]):
if i - x_shift >= shape[0] / 2:
x = i - shape[0] - x_shift
else:
x = i - x_shift
if j - y_shift >= shape[1] / 2:
y = j - shape[1] - y_shift
else:
y = j - y_shift
if k - z_shift >= shape[2] / 2:
z = k - shape[2] - z_shift
else:
z = k - z_shift
r = np.dot(transition_matrix, np.array([x, y, z])) / bohr_radius
for o, orbital in enumerate(acc_orbitals):
orbital[i][j][k] = tip_types.orbitals(orb_type)[o](r)
return acc_orbitals
def _get_overlap_integrals_squared(self, wf, cutoff, acc_orbitals, zmin, zmax):
xlen, ylen, zlen = np.shape(wf)
overlap_integrals_squared = np.zeros((xlen, ylen))
for i in range(xlen):
if i - cutoff < 0:
wf_rolled_x = np.roll(wf, cutoff, axis=0)
orb_rolled_x = []
for orbital in acc_orbitals:
orb_rolled_x.append(np.roll(orbital, i + cutoff, axis=0))
xmin = i
xmax = i + cutoff * 2
elif i - cutoff >= 0 and i + cutoff <= xlen:
wf_rolled_x = np.copy(wf)
orb_rolled_x = []
for orbital in acc_orbitals:
orb_rolled_x.append(np.roll(orbital, i, axis = 0))
xmin = i - cutoff
xmax = i + cutoff
elif i + cutoff > xlen:
wf_rolled_x = np.roll(wf, -cutoff, axis=0)
orb_rolled_x = []
for orbital in acc_orbitals:
orb_rolled_x.append(np.roll(orbital, i - cutoff, axis=0))
xmin = i - cutoff * 2
xmax = i
else:
print(f"ERROR: for i = {i} something with rolling arrays along x goes wrong")
for j in range(ylen):
if j - cutoff < 0:
wf_rolled = np.roll(wf_rolled_x, cutoff, axis=1)
orb_rolled = []
for orbital in orb_rolled_x:
orb_rolled.append(np.roll(orbital, j + cutoff, axis=1))
ymin = j
ymax = j + cutoff * 2
elif j - cutoff >= 0 and j + cutoff <= ylen:
wf_rolled = np.copy(wf_rolled_x)
orb_rolled = []
for orbital in orb_rolled_x:
orb_rolled.append(np.roll(orbital, j, axis=1))
ymin = j - cutoff
ymax = j + cutoff
elif j + cutoff > ylen:
wf_rolled = np.roll(wf_rolled_x, -cutoff, axis=1)
orb_rolled = []
for orbital in orb_rolled_x:
orb_rolled.append(np.roll(orbital, j - cutoff, axis=1))
ymin = j - cutoff * 2
ymax = j
else:
print(f"ERROR: for i = {i} something with rolling arrays along y goes wrong")
integral = []
for orbital in orb_rolled:
integral.append(np.linalg.norm(wf_rolled[xmin:xmax, ymin:ymax, zmin:zmax]*\
orbital[xmin:xmax, ymin:ymax, zmin:zmax]))
overlap_integrals_squared[i][j] = max(integral) ** 2
return overlap_integrals_squared
def save_as_cube(self, array, name, dir):
# TODO: rewrite
import os
if not os.path.exists(dir):
print(f"Directory {dir} does not exist. Creating directory")
os.mkdir(dir)
shape = np.shape(array)
with open(self.working_folder + '/POSCAR') as inf: #TODO get data from poscar class would be better
lines = inf.readlines()
natoms = sum(map(int, lines[6].strip().split()))
atomtypes = lines[5].strip().split()
numbers_of_atoms = list(map(int, lines[6].strip().split()))
type_of_i_atom = []
basis = []
for i in [2, 3, 4]:
vector = list(map(float, lines[i].strip().split()))
basis.append(vector)
basis = np.array(basis)
for i, number in enumerate(numbers_of_atoms):
for j in range(number):
type_of_i_atom.append(atomtypes[i])
with open(dir + '/' + name + '.cube', 'w') as ouf:
ouf.write(' This file is generated using stm.py module\n')
ouf.write(' Good luck\n')
ouf.write(' ' + str(natoms) + '\t0.000\t0.000\t0.000\n')
ouf.write(' ' + str(-shape[0]) + lines[2])
ouf.write(' ' + str(-shape[1]) + lines[3])
ouf.write(' ' + str(-shape[2]) + lines[4])
for i, line in enumerate(lines[8:natoms + 8]):
coordinate = np.array(list(map(float, line.strip().split())))
if lines[7].strip() == 'Direct':
coordinate = coordinate.dot(basis)
coordinate *= constants.BOHR_RADIUS
elif lines[7].strip() == 'Cartesian':
coordinate *= constants.BOHR_RADIUS
else:
print('WARNING!!! Cannot read POSCAR correctly')
atomtype = type_of_i_atom[i]
atomnumber = list(constants.ElemNum2Name.keys())[list(constants.ElemNum2Name.values()).index(atomtype)]
ouf.write(
' ' + str(atomnumber) + '\t0.00000\t' + str(coordinate[0]) + '\t' + str(coordinate[1]) + '\t' + str(
coordinate[2]) + '\n')
counter = 0
for i in range(shape[0]):
for j in range(shape[1]):
for k in range(shape[2]):
ouf.write(str('%.5E' % array[i][j][k]) + ' ')
counter += 1
if counter % 6 == 0:
ouf.write('\n ')
ouf.write('\n ')
print(f"File {name} saved")

View File

@@ -0,0 +1,12 @@
class Ldos():
"""
The aim of this class is to produce LDOS images (2d or 3d) in different energy ranges
"""
# TODO:
# 1) One should get all necessary data from WAVECAR (process WAVECAR with energy step dE and
# produce electron denstites
#
#

View File

@@ -0,0 +1,571 @@
from monty.re import regrep
from pymatgen.io.vasp.outputs import Locpot
from pymatgen.io.vasp.outputs import Procar
from electrochemistry.core.vaspwfc_p3 import vaspwfc
import numpy as np
import multiprocessing as mp
import os
import sys
import time
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter1d
class Preprocessing():
"""
This class is for basic preprocessing VASP output files (WAVECAR, OUTCAR, LOCPOT)
Main goal for this class is to extract necessary date from VASP files and save data to .npy files:
efermi - Fermi level
eigenvalues - eigenvalues for each kpoint and band
ndands - number of bands
nkpts - number of kpoints
occupations - occupations for each kpoint and band
weights - weights for each kpoint
WF_ijke - "effective" wavefunction 3D grids for each small energy range from E to E+dE
"""
def __init__(self, working_folder='', print_out=False, dir_to_save=None):
"""
Initialization of variables
"""
self.print_out = print_out
self.working_folder = working_folder
if dir_to_save == None:
self.dir_to_save = working_folder+'/Saved_data'
else:
self.dir_to_save = dir_to_save
self.efermi = None
self.nkpts = None
self.nbands = None
self.weights = None
self.eigenvalues = None
self.occupations = None
self.vacuum_lvl = None
self.procar_data = None
self.ion_names = None
self.orbital_names = None
self.wavecar_path = working_folder+'/WAVECAR'
self.outcar_path = working_folder+'/OUTCAR'
self.poscar_path = working_folder+'/POSCAR'
self.procar_path = working_folder+'/PROCAR'
self.locpot_path = working_folder+'/LOCPOT'
def get_wavefunction(self, mesh_shape, energies, kb_array, weights, arr_real, arr_imag, arr_cd, done_number, n_grid, wavecar_path=None):
"""
This function is inner function for processes.
It extracts wavefunction from WAVECAR using vaspwfc library (see. https://github.com/QijingZheng/VaspBandUnfolding)
:param mesh_shape: shape of wavefucntion mesh (tuple of 3 int)
:param energies: list of energies for current process to be done (list of int)
:param kb_array: array of kpoints and bands which lay in current energy range (np.array of tuples (k,b) depends on energies)
:param weights: np.array of float64 containes weights of different k-points (np.array of float depends on energies)
:param arr_real: shared memory float64 array to put real part of wavefunction
:param arr_imag: shared memory float64 array to put imaginary part of wavefunction
:param done_number: shared memory int to controll number of done energy ranges
:param wavecar_path: path to WAVECAR file
:param n_grid: float parameter (multipicator) to increase mesh density (1.0 - standart grid, higher values e.g. 1.5, 2.0 - to inrease quality of grid)
:return:
"""
if wavecar_path == None:
wavecar_path = self.wavecar_path
t_p = time.time()
wavecar = vaspwfc(wavecar_path)
#print("Reading WAVECAR takes", time.time()-t_p, " sec")
#sys.stdout.flush()
for e in energies:
#t = time.time()
n = mesh_shape[0]*mesh_shape[1]*mesh_shape[2]
start = e * n
kb_array_for_e = kb_array[e]
WF_for_current_Energy_real = np.zeros(mesh_shape)
WF_for_current_Energy_imag = np.zeros(mesh_shape)
CD_for_current_Energy = np.zeros(mesh_shape)
for kb in kb_array_for_e:
kpoint = kb[0]
band = kb[1]
wf = wavecar.wfc_r(ikpt=kpoint, iband=band, ngrid=wavecar._ngrid * n_grid)
phi_real = np.real(wf)
phi_imag = np.imag(wf)
phi_squared = np.abs(wf)**2
#print ("Wavefunction done from process:", e, "; kpoint = ", kpoint, "; band = ", band)
#sys.stdout.flush()
WF_for_current_Energy_real += phi_real * np.sqrt(weights[kpoint - 1])
WF_for_current_Energy_imag += phi_imag * np.sqrt(weights[kpoint - 1])
CD_for_current_Energy += phi_squared * weights[kpoint - 1]
WF_for_current_Energy_1D_real = np.reshape(WF_for_current_Energy_real, n)
WF_for_current_Energy_1D_imag = np.reshape(WF_for_current_Energy_imag, n)
CD_for_current_Energy_1D = np.reshape(CD_for_current_Energy, n)
for ii in range(n):
arr_real[ii + start] = float(WF_for_current_Energy_1D_real[ii])
arr_imag[ii + start] = float(WF_for_current_Energy_1D_imag[ii])
arr_cd[ii + start] = float(CD_for_current_Energy_1D[ii])
#print ("Energy ", e, " finished, it takes: ", time.time()-t, " sec")
#sys.stdout.flush()
done_number.value += 1
if self.print_out == True:
print(done_number.value, " energies are done")
sys.stdout.flush()
def process_WAVECAR(self, Erange_from_fermi, dE, wavecar_path=None, dir_to_save=None, n_grid=1.0):
"""
This function is based on vaspwfc class https://github.com/QijingZheng/VaspBandUnfolding
This function process WAVECAR file obtained with VASP. It saves 4D np.array WF_ijke.npy
which containes spacially resolved _complex128 effective wavefunctions for each energy range [E, E+dE]
'Effective' means that we sum wavefunctions with close energies.
UPDATE: The function saves also dictionary WF_data.npy, which contains two keys: 'energy_range','dE'
:param Erange_from_fermi: Energy range for wich we extract wavefunction (tuple or list of float)
:param dE: Energy step (float)
:param wavecar_path: path to WAVECAR file (string)
:param dir_to_save: directory to save WF_ijke.npy file (string)
:param n_grid: float parameter (multipicator) to increase mesh density (1.0 - standart grid, higher values e.g. 1.5, 2.0 - to inrease quality of grid)
:return: nothing
"""
if wavecar_path == None:
wavecar_path = self.wavecar_path
if dir_to_save == None:
dir_to_save = self.dir_to_save
for var in ['efermi', 'nkpts', 'nbands', 'eigenvalues', 'weights']:
self.check_existance(var)
Erange = [round(Erange_from_fermi[0] + self.efermi, 5), round(Erange_from_fermi[1] + self.efermi, 5)]
energies_number = int((Erange[1] - Erange[0]) / dE)
kb_array = [[] for i in range(energies_number)]
wavecar = vaspwfc(wavecar_path)
wf = wavecar.wfc_r(ikpt=1, iband=1, ngrid=wavecar._ngrid * n_grid)
mesh_shape = np.shape(wf)
n = mesh_shape[0]*mesh_shape[1]*mesh_shape[2]
for band in range(1, self.nbands+1):
for kpoint in range(1, self.nkpts+1):
energy = self.eigenvalues[kpoint - 1][band - 1]
if energy >= Erange[0] and energy < Erange[1]:
e = int((energy - Erange[0]) / dE)
kb_array[e].append([kpoint, band])
t_arr = time.time()
arr_real = mp.Array('f', [0.0] * n * energies_number)
arr_imag = mp.Array('f', [0.0] * n * energies_number)
arr_cd = mp.Array('f', [0.0] * n * energies_number)
done_number = mp.Value('i', 0)
if self.print_out == True:
print("Array Created, it takes: ", time.time() - t_arr, " sec")
processes = []
if self.print_out == True:
print("Total Number Of Energies = ", energies_number)
numCPU = mp.cpu_count()
Energies_per_CPU = energies_number // numCPU + 1
for i in range(numCPU):
if (i + 1) * Energies_per_CPU > energies_number:
energies = np.arange(i * Energies_per_CPU, energies_number)
else:
energies = np.arange(i * Energies_per_CPU, (i + 1) * Energies_per_CPU)
p = mp.Process(target=self.get_wavefunction, args=(mesh_shape, energies, kb_array, self.weights, arr_real, arr_imag, arr_cd, done_number, n_grid, wavecar_path))
processes.append(p)
if self.print_out == True:
print(numCPU, " CPU Available")
for step in range(len(processes) // numCPU + 1):
for i in range(step * numCPU, (step + 1) * numCPU):
try:
processes[i].start()
except:
pass
for i in range(step * numCPU, (step + 1) * numCPU):
try:
processes[i].join()
except:
pass
if self.print_out == True:
print("All energies DONE! Start wavefunction reshaping")
arr = np.zeros(n * energies_number, dtype=np.complex_)
arr.real = arr_real
arr.imag = arr_imag
wave_functions = np.reshape(arr, (energies_number,) + mesh_shape)
wave_functions = np.moveaxis(wave_functions, 0, -1)
charge_densities = np.reshape(arr_cd, (energies_number,) + mesh_shape)
charge_densities = np.moveaxis(charge_densities, 0, -1)
WF_data = {'energy_range':Erange_from_fermi, 'dE':dE}
np.save(dir_to_save+'/WF_ijke', wave_functions)
np.save(dir_to_save+'/CD_ijke', charge_densities)
if self.print_out == True:
print(dir_to_save+'/WF_ijke.npy Saved')
print(dir_to_save+'/CD_ijke.npy Saved')
np.save(dir_to_save+'/WF_data', WF_data)
if self.print_out == True:
print(dir_to_save+'/WF_data.npy Saved')
def process_OUTCAR(self, outcar_path=None, dir_to_save=None, optimization=False):
"""
process OUTCAR file obtained from VASP
get following variables:
self.nkpts - number of k-points (int)
self.efermi - Fermi level (float)
self.nbands - number of bands (int)
self.eigenvalues - 2D np.array, eigenvalues[i][j] contains energy for i k-point and j band
self.occupations - 2D np.array, occupations[i][j] contains occupation for i k-point and j band
:param outcar_path: path to OUTCAR file
:return: nothing
"""
if outcar_path == None:
outcar_path = self.outcar_path
if dir_to_save == None:
dir_to_save = self.dir_to_save
patterns = {'nkpts': r'Found\s+(\d+)\s+irreducible\sk-points',
'weights':'Following reciprocal coordinates:',
'efermi': 'E-fermi\s:\s+([-.\d]+)',
'kpoints': r'k-point\s+(\d+)\s:\s+[-.\d]+\s+[-.\d]+\s+[-.\d]+\n'}
matches = regrep(outcar_path, patterns)
self.nkpts = int(matches['nkpts'][0][0][0])
if optimization:
self.efermi = []
efermi_data = np.array(matches['efermi'])[...,0]
number_of_ionic_steps = len(efermi_data)
for i in range(number_of_ionic_steps):
self.efermi.append(float(efermi_data[i][0]))
else:
self.efermi = float(matches['efermi'][0][0][0])
self.nbands = int(matches['kpoints'][1][1] - matches['kpoints'][0][1]-3)
self.eigenvalues = []
self.occupations = []
self.weights = []
with open(outcar_path) as file:
lines = file.readlines()
for i in range(self.nkpts):
self.weights.append(float(lines[matches['weights'][0][1]+i+2].split()[3]))
if optimization:
for step in range(number_of_ionic_steps):
self.eigenvalues.append([])
self.occupations.append([])
for kpoint in range(self.nkpts):
self.eigenvalues[step].append([])
self.occupations[step].append([])
startline = matches['kpoints'][kpoint+(step*self.nkpts)][1] + 2
for i in range(startline, startline + self.nbands):
self.eigenvalues[step][kpoint].append(float(lines[i].split()[1]))
self.occupations[step][kpoint].append(float(lines[i].split()[2]))
else:
for kpoint in range(self.nkpts):
self.eigenvalues.append([])
self.occupations.append([])
startline = matches['kpoints'][kpoint][1]+2
for i in range(startline, startline + self.nbands):
self.eigenvalues[kpoint].append(float(lines[i].split()[1]))
self.occupations[kpoint].append(float(lines[i].split()[2]))
self.eigenvalues = np.array(self.eigenvalues)
self.occupations = np.array(self.occupations)
self.weights = np.array(self.weights)
self.weights /= np.sum(self.weights)
for var in ['efermi', 'nkpts', 'nbands', 'weights', 'eigenvalues', 'occupations']:
self.save(var, dir_to_save)
def process_PROCAR(self, procar_path=None, poscar_path=None, dir_to_save=None):
"""
This function process PROCAR file obtained by VASP using pymatgen.io_data.vasp.outputs.Procar class
and saves ion_names, orbital_names and data array
data array contains projections in the following form: data[kpoint][band][ion_number][orbital_number]
All numberings start from 0
:param procar_path: path to PROCAR file
:param poscar_path: path to POSCAR file
:param dir_to_save: directory to save data
:return:
"""
if procar_path == None:
procar_path = self.procar_path
if poscar_path == None:
poscar_path = self.poscar_path
if dir_to_save == None:
dir_to_save = self.dir_to_save
procar=Procar(procar_path)
for key in procar.data.keys():
self.procar_data = procar.data[key]
self.orbital_names = procar.orbitals
self.ion_names = self._get_atom_types(poscar_path=poscar_path)
for var in ['procar_data', 'orbital_names', 'ion_names']:
self.save(var, dir_to_save)
def _get_atom_types(self, poscar_path=None):
"""
Inner function to obtain list of atom types from POSCAR
:param file_path: path to POSCAR file
:return: atom_types 1D array with atom_types. Index from 0
"""
if poscar_path == None:
poscar_path = self.poscar_path
with open(poscar_path) as inf:
lines = inf.readlines()
ion_types = lines[5].strip().split()
nions = map(int, lines[6].strip().split())
atom_types=[]
for i, number in enumerate(nions):
for j in range(number):
atom_types.append(ion_types[i])
return atom_types
def process_LOCPOT(self, locpot_path=None, dir_to_save=None):
"""
This function process LOCPOT file obtained by VASP
:param file_path: path to LOCPOT file
:param dir_to_save: path to directory to save vacuum_lvl
:return: nothing
"""
if locpot_path == None:
locpot_path = self.locpot_path
if dir_to_save == None:
dir_to_save = self.dir_to_save
locpot = Locpot.from_file(locpot_path)
avr = locpot.get_average_along_axis(2)
self.vacuum_lvl = np.max(avr)
self.save('vacuum_lvl', dir_to_save)
def save(self, variable='all', dir=None):
"""
This function saves variables in .npy files
:param variable: desired variable
:param dir: directory name to save file
:return: nothing
"""
if dir == None:
dir = self.dir_to_save
if variable == 'all':
for var in ['efermi', 'nkpts', 'nbands', 'weights', 'eigenvalues', 'occupations', 'vacuum_lvl']:
self.save(var)
else:
if not os.path.exists(dir):
if self.print_out == True:
print('Directory ', dir, ' does not exist. Creating directory')
os.mkdir(dir)
np.save(dir+'/'+variable+'.npy', getattr(self, variable))
if self.print_out == True:
print('Variable ', variable, ' saved to directory ', dir)
def load(self, variable='all', dir=None):
"""
This function loads variables from .npy files to class variables
:param variable: desired variable
:param dir: directory from which load files
:return: nothing
"""
if dir == None:
dir = self.dir_to_save
if variable == 'all':
for var in ['efermi', 'nkpts', 'nbands', 'weights', 'eigenvalues', 'occupations', 'vacuum_lvl']:
self.load(var)
else:
setattr(self, variable, np.load(dir+'/'+str(variable)+'.npy'))
if self.print_out == True:
print('Variable ', variable, ' loaded')
def check_existance(self, variable='all', dir=None):
"""
This function checks whether desires variable is not None and if necessary load it from file or process VASP data
:param variable: desired variable
:param dir: directory in which check .npy saved data
:return: nothing
"""
if dir == None:
dir = self.dir_to_save
if variable == 'all':
for var in ['efermi', 'nkpts', 'nbands', 'weights', 'eigenvalues', 'occupations', 'vacuum_lvl']:
self.check_existance(var)
else:
if getattr(self, variable) is None:
try:
if self.print_out == True:
print('Try load variable ',variable, ' from dir ', dir)
self.load(variable, dir)
except:
if variable == 'vacuum_lvl':
if self.print_out == True:
print('Loading ', variable, ' failed! Start processing LOCPOT')
self.process_LOCPOT()
elif variable == 'procar_data' or variable == 'orbital_names' \
or variable == 'ion_names':
if self.print_out == True:
print('Loading ', variable, ' failed! Start processing PROCAR and POSCAR')
self.process_PROCAR()
else:
if self.print_out == True:
print('Loading ', variable, ' failed! Start processing OUTCAR')
self.process_OUTCAR()
else:
if self.print_out == True:
print('Variable ', variable, 'exists')
def get_band_eigs(self, band, outcar_path='OUTCAR'):
"""
This function get eigenvalues for desired band
:param band: desired band
:param outcar_path: path to OUTCAR file
:return:
"""
if self.eigenvalues is None:
try:
if self.print_out == True:
print("Variable is not define. Try load from file")
self.load('eigenvalues')
except:
if self.print_out == True:
print("Loading failed. Start processing OUTCAR")
self.process_OUTCAR(outcar_path)
return self.eigenvalues[:,band]
else:
return self.eigenvalues[:,band]
def get_band_occ(self, band, outcar_path='OUTCAR'):
"""
This function get occupations for desired band
:param band: desired band
:param outcar_path: path to OUTCAR file
:return:
"""
if self.occupations is None:
try:
if self.print_out == True:
print("Variable is not define. Try load from file")
self.load('occupations')
except:
if self.print_out == True:
print("Loading failed. Start processing OUTCAR")
self.process_OUTCAR(outcar_path)
return self.occupations[:, band]
else:
return self.occupations[:, band]
def get_DOS(self, Erange_from_fermi, dE, optimization=False):
"""
This function calculates Density of States for desired energy range and energy step
:param Erange_from_fermi: Energy range
:param dE: energy step
:param dE_new : recalculated energy step to enshure integer number of steps
:return:
E_arr: float64 np.array with energies relative to Fermi level
DOS_arr: float64 np.array with Density of States
"""
for var in ['efermi', 'nkpts', 'nbands', 'eigenvalues', 'weights']:
self.check_existance(var)
if optimization:
DOS_arr = []
E_arr = []
for step in range(len(self.efermi)):
Erange = [Erange_from_fermi[0] + self.efermi[step], Erange_from_fermi[1] + self.efermi[step]]
energies_number = int((Erange[1] - Erange[0]) / dE)
dE_new = (Erange[1] - Erange[0]) / energies_number
DOS_arr.append(np.zeros(energies_number))
E_arr.append(np.arange(Erange_from_fermi[0], Erange_from_fermi[1], dE_new))
for k in range(self.nkpts):
for b in range(self.nbands):
energy = self.eigenvalues[step][k][b]
if energy > Erange[0] and energy < Erange[1]:
e = int((energy - Erange[0]) / dE_new)
DOS_arr[step][e] += self.weights[k] / dE_new
return E_arr, DOS_arr * 2
else:
Erange = [Erange_from_fermi[0] + self.efermi, Erange_from_fermi[1] + self.efermi]
energies_number = int((Erange[1] - Erange[0]) / dE)
dE_new = (Erange[1] - Erange[0])/energies_number
DOS_arr = np.zeros(energies_number)
E_arr = np.arange(Erange_from_fermi[0], Erange_from_fermi[1], dE_new)
for k in range(self.nkpts):
for b in range(self.nbands):
energy = self.eigenvalues[k][b]
if energy > Erange[0] and energy < Erange[1]:
e = int((energy - Erange[0]) / dE_new)
DOS_arr[e]+=self.weights[k] / dE_new
return E_arr, DOS_arr * 2, dE_new
def get_pdos(self, Erange_from_fermi, dE, ions='all', orbitals='all', dir_to_data=None):
if dir_to_data == None:
dir_to_data = self.dir_to_save
for var in ['procar_data' , 'orbital_names', 'ion_names', 'efermi', 'eigenvalues', 'weights']:
self.check_existance(var, dir_to_data)
Erange = [Erange_from_fermi[0] + self.efermi, Erange_from_fermi[1] + self.efermi]
energies_number = int((Erange[1] - Erange[0]) / dE)
dE_new = (Erange[1] - Erange[0]) / energies_number
DOS_arr = np.zeros(energies_number)
E_arr = np.arange(Erange_from_fermi[0], Erange_from_fermi[1], dE_new)
nkpts = np.shape(self.procar_data)[0]
nbands = np.shape(self.procar_data)[1]
for k in range(nkpts):
if self.print_out == True:
print('kpoint = ', k)
for b in range(nbands):
energy = self.eigenvalues[k][b]
if energy > Erange[0] and energy < Erange[1]:
e = int((energy - Erange[0]) / dE_new)
if ions == 'all':
list_of_ions = [i for i in range(len(self.ion_names))]
elif type(ions) == str:
list_of_ions = []
for i, name in enumerate(self.ion_names):
if name == ions:
list_of_ions.append(i)
elif type(ions) == list:
if type(ions[0]) == int:
list_of_ions = ions
elif type(ions[0]) == str:
list_of_ions = []
for ion_name in ions:
for i, name in enumerate(self.ion_names):
if name == ion_name:
list_of_ions.append(i)
if orbitals == 'all':
list_of_orbitals = [i for i in range(len(self.orbital_names))]
elif type(orbitals) == str:
list_of_orbitals = []
for i, name in enumerate(self.orbital_names):
if name == orbitals:
list_of_orbitals.append(i)
elif type(orbitals) == list:
if type(orbitals[0]) == int:
list_of_orbitals = ions
elif type(orbitals[0]) == str:
list_of_orbitals = []
for orb_name in orbitals:
for i, name in enumerate(self.orbital_names):
if name == orb_name:
list_of_orbitals.append(i)
weight = 0
for ion in list_of_ions:
for orb in list_of_orbitals:
weight+=self.procar_data[k][b][ion][orb]
DOS_arr[e] += weight / dE_new * self.weights[k]
return E_arr, DOS_arr * 2, dE_new
if __name__=='__main__':
t=time.time()
"""
outcar_path = 'OUTCAR'
wavecar_path = 'WAVECAR'
dir_to_save = "Saved_data"
p = Preprocessing(dir_to_save)
p.process_OUTCAR(outcar_path=outcar_path, dir_to_save=dir_to_save)
print('Processing OUTCAR takes: ', time.time()-t, 'sec')
t=time.time()
p.process_WAVECAR((-7.0, 4.0), 0.01, wavecar_path=wavecar_path, dir_to_save=dir_to_save)
print('Job done! Processing WAVECAR: ', time.time()-t, ' sec')
"""
dir_to_save = 'Saved_data'
p = Preprocessing(dir_to_save)
Erange = [-25.0, 5.0]
dE = 0.1
E_arr, DOS_arr, dE_new = p.get_DOS(Erange, dE)
plt.plot(E_arr, gaussian_filter1d(DOS_arr, sigma=2), label='total')
E_arr, PDOS_arr, dE_new = p.get_pdos(Erange, dE, ions='C', orbitals='s')
plt.plot(E_arr, gaussian_filter1d(PDOS_arr, sigma=2), label='C-s')
E_arr, PDOS_arr, dE_new = p.get_pdos(Erange, dE, ions='C', orbitals=['px', 'py', 'pz'])
plt.plot(E_arr, gaussian_filter1d(PDOS_arr, sigma=2), label='C-p')
E_arr, PDOS_arr, dE_new = p.get_pdos(Erange, dE, ions='H', orbitals='s')
plt.plot(E_arr, gaussian_filter1d(PDOS_arr, sigma=2), label='H-s')
E_arr, PDOS_arr, dE_new = p.get_pdos(Erange, dE, ions='O', orbitals='s')
plt.plot(E_arr, gaussian_filter1d(PDOS_arr, sigma=2), label='O-s')
E_arr, PDOS_arr, dE_new = p.get_pdos(Erange, dE, ions='O', orbitals=['px', 'py', 'pz'])
plt.plot(E_arr, gaussian_filter1d(PDOS_arr, sigma=2), label='O-p')
plt.xlabel('E, eV')
plt.ylabel('DOS, states/eV/cell')
plt.legend()
plt.savefig('pdos.png', dpi=300)
plt.show()
plt.close()

View File

@@ -0,0 +1,811 @@
import numpy as np
import os
import time
import math
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.ndimage.filters import gaussian_filter1d
import tip_types
from echem.core import ElemNum2Name
plt.switch_backend('agg')
class STM:
"""
This class is for computing 2D and 3D STM images with different theory levels:
Tersoff-Hummann, Chen, analytical acceptor wavefucntions (oxygen)
Also this class can calculate 2D ECSTM images using GerisherMarcus module
In current version of program this class is no more needed. Most of functions are relocated to kHET_spatial.py
"""
PLANCK_CONSTANT = 4.135667662e-15 # Planck's constant in eV*s
BOLTZMANN_CONSTANT = 8.617333262145e-5 # Boltzmann's constant in eV/K
ELEM_CHARGE = 1.60217662e-19 # Elementary charge in Coulombs
BOHR_RADIUS = 1.88973
AVAILABLE_TIPS_TYPES = ['oxygen', 'IrCl6', 'RuNH3_6', 'RuNH3_6_NNN_plane', 'RuNH3_6_perpendicular', 'oxygen_parallel_x', 'oxygen_parallel_y']
def __init__(self, working_folder):
self.WF_ijke = None
self.CD_ijke = None
self.dE = None
self.energy_range = None
self.working_folder = working_folder
self.path_to_data = self.working_folder+'/Saved_data'
self.WF_data_path = self.path_to_data+'/WF_data.npy'
self.WF_ijke_path = self.path_to_data+'/WF_ijke.npy'
self.CD_ijke_path = self.path_to_data+'/CD_ijke.npy'
self.poscar_path = working_folder+'/POSCAR'
def save_as_vasp(self, array, name, dir):
if not os.path.exists(dir):
print(f"Directory {dir} does not exist. Creating directory")
os.mkdir(dir)
shape = np.shape(array)
with open(self.poscar_path) as inf:
lines = inf.readlines()
natoms = int(lines[6].strip())
with open(dir+'/'+ name + '.vasp', 'w') as ouf:
ouf.writelines(lines[:natoms+8])
ouf.write('\n ' + str(shape[0]) + ' ' + str(shape[1]) + ' ' + str(shape[2]) + '\n ')
counter = 0
for k in range(shape[2]):
for j in range(shape[1]):
for i in range(shape[0]):
ouf.write(str('%.8E' % array[i][j][k]) + ' ')
counter += 1
if counter % 10 == 0:
ouf.write('\n ')
print(f"File {name} saved")
def save_as_cube(self, array, name, dir):
if not os.path.exists(dir):
print(f"Directory {dir} does not exist. Creating directory")
os.mkdir(dir)
shape = np.shape(array)
with open(self.poscar_path) as inf:
lines = inf.readlines()
natoms = sum(map(int, lines[6].strip().split()))
atomtypes = lines[5].strip().split()
numbers_of_atoms = list(map(int, lines[6].strip().split()))
type_of_i_atom = []
basis = []
for i in [2,3,4]:
vector = list(map(float, lines[i].strip().split()))
basis.append(vector)
basis = np.array(basis)
for i, number in enumerate(numbers_of_atoms):
for j in range(number):
type_of_i_atom.append(atomtypes[i])
with open(dir+'/'+ name + '.cube', 'w') as ouf:
ouf.write(' This file is generated using stm.py module\n')
ouf.write(' Good luck\n')
ouf.write(' ' + str(natoms) + '\t0.000\t0.000\t0.000\n')
ouf.write(' ' + str(-shape[0]) + lines[2])
ouf.write(' ' + str(-shape[1]) + lines[3])
ouf.write(' ' + str(-shape[2]) + lines[4])
for i, line in enumerate(lines[8:natoms+8]):
coordinate = np.array(list(map(float, line.strip().split())))
if lines[7].strip() == 'Direct':
coordinate = coordinate.dot(basis)
coordinate *= self.BOHR_RADIUS
elif lines[7].strip() == 'Cartesian':
coordinate *= self.BOHR_RADIUS
else:
print ('WARNING!!! Cannot read POSCAR correctly')
atomtype = type_of_i_atom[i]
atomnumber = list(ElemNum2Name.keys())[list(ElemNum2Name.values()).index(atomtype)]
ouf.write(' ' + str(atomnumber) + '\t0.00000\t' + str(coordinate[0])+ '\t' + str(coordinate[1])+ '\t' + str(coordinate[2])+'\n')
counter = 0
for i in range(shape[0]):
for j in range(shape[1]):
for k in range(shape[2]):
ouf.write(str('%.5E' % array[i][j][k]) + ' ')
counter += 1
if counter % 6 == 0:
ouf.write('\n ')
ouf.write('\n ')
print(f"File {name} saved")
def load_data(self):
WF_data = np.load(self.WF_data_path, allow_pickle=True).item()
self.dE = WF_data['dE']
self.energy_range = WF_data['energy_range']
self.WF_ijke = np.load(self.WF_ijke_path)
self.CD_ijke = np.load(self.CD_ijke_path)
def set_ecstm_parameters(self, C_EDL, T, lambda_, V_std, overpot,
effective_freq, linear_constant, threshold_value):
"""
:param C_EDL: float
Capacitance of electric double layer (microF/cm^2)
:param T: int, float
Temperature. It is used in computing Fermi function and distribution function of redox system states
:param lambda_: float
Reorganization energy in eV
:param V_std: float
Standart potential of the redox couple (Volts)
:param overpot: float
Overpotential (Volts). It shifts the electrode Fermi energy to -|e|*overpot
:param effective_freq: float
Effective frequency of redox species motion
:param effective_freq: float
Linear constant of proportionality of Hif and Sif: Hif = linear_constant * Sif
:param threshold_value: float
Minimum value of y_redox and y_fermi to be considered in integral
:return:
"""
self.C_EDL = C_EDL
self.T = T
self.lambda_ = lambda_
self.sheet_area = self._get_sheet_area() # Area of investigated surface(XY) in cm^2
self.V_std = V_std
self.overpot = overpot
self.effective_freq = effective_freq
self.linear_constant = linear_constant
self.threshold_value = threshold_value
def load_data_for_ecstm(self):
"""
This inner function load necessary data for ECSTM calculations
:param outcar_path: str
path to OUTCAR vasp file
:param locpot_path: str
path to LOCPOT vasp file
:return:
"""
try:
self.E = np.load(self.path_to_data+'/E.npy')
self.DOS = np.load(self.path_to_data+'/DOS.npy')
except:
import preprocessing_v2 as preprocessing
p = preprocessing.Preprocessing(working_folder=self.working_folder)
p.process_OUTCAR()
self.E, self.DOS, dE_new = p.get_DOS(self.energy_range, self.dE)
if dE_new != self.dE:
print("WARNING! Something wrong with dE during DOS calculations")
np.save(self.path_to_data+'/DOS.npy', self.DOS)
np.save(self.path_to_data+'/E.npy', self.E)
try:
self.efermi = np.load(self.path_to_data+'/efermi.npy')
except:
print(f"ERROR! {self.path_to_data}/efermi.npy does not exist. Try to preprocess data")
try:
self.vacuum_lvl = np.load(self.path_to_data+'vacuum_lvl.npy')
except:
from pymatgen.io.vasp.outputs import Locpot
locpot = Locpot.from_file(self.working_folder+'/LOCPOT')
avr = locpot.get_average_along_axis(2)
self.vacuum_lvl = np.max(avr)
np.save(self.path_to_data+'/vacuum_lvl.npy', self.vacuum_lvl)
def _calculate_distributions(self):
"""
This function calls GerisherMarcus module with GM class to calculate distribution of redox species
and Fermi-Dirac distribution according to Gerisher-Marcformalismizm
:return:
"""
import GerischerMarkus as gm
gerisher_marcus_obj = gm.GM(path_to_data = self.path_to_data)
gerisher_marcus_obj.set_params(self.C_EDL, self.T, self.lambda_, self.sheet_area)
self.y_fermi, self.y_redox = gerisher_marcus_obj.compute_distributions(self.V_std, overpot=self.overpot)
return gerisher_marcus_obj
@staticmethod
def _nearest_array_indices(array, value):
i = 0
while value > array[i]:
i += 1
return i - 1, i
def plot_distributions(self, E_range=[-7, 4], dE=None, sigma=2, fill_area_lower_Fermi_lvl=True,
plot_Fermi_Dirac_distib=True, plot_redox_distrib=True):
a = self._calculate_distributions()
if dE == None:
dE = self.dE
if dE != self.dE or E_range[0] < self.energy_range[0] or E_range[1] > self.energy_range[1]:
import preprocessing_v2 as prep
p = prep.Preprocessing(working_folder = self.working_folder)
E, DOS, dE_new = p.get_DOS(E_range, dE)
else:
E, DOS = a.E, a.DOS
n_1, n_2 = self._nearest_array_indices(E, a.dE_Q_eq)
if sigma > 0 and sigma != None:
DOS = gaussian_filter1d(DOS, sigma)
plt.plot(E, DOS)
if fill_area_lower_Fermi_lvl == True:
plt.fill_between(E[:n_2], DOS[:n_2])
if plot_Fermi_Dirac_distib == True:
plt.plot(a.E, self.y_fermi * 30)
if plot_redox_distrib == True:
plt.plot(a.E, self.y_redox * 10)
plt.xlabel('E, eV')
plt.ylabel('DOS, states/eV/cell')
plt.xlim(E_range)
plt.savefig(f"distributions_{self.V_std}_{self.overpot}.png", dpi=300)
plt.close()
def _kT_in_eV(self, T):
return T * self.BOLTZMANN_CONSTANT
def _get_kappa(self, matrix_elements_squared):
lz_factor = 2 * matrix_elements_squared / self.PLANCK_CONSTANT / self.effective_freq * math.sqrt(
math.pi / self.lambda_ / self._kT_in_eV(self.T))
kappa = 1 - np.exp(-2 * math.pi * lz_factor)
return kappa
def _get_basis_vectors(self):
"""
This function processes POSCAR file to extract vectors of the simulation box
:param poscar_path:
:return:
"""
with open(self.poscar_path) as inf:
lines = inf.readlines()
b1 = np.array(list(map(float, lines[2].strip().split())))
b2 = np.array(list(map(float, lines[3].strip().split())))
b3 = np.array(list(map(float, lines[4].strip().split())))
return b1, b2, b3
def _get_sheet_area(self):
"""
Inner function to calculate sheet_area (XY plane) in cm^2
"""
b1, b2, b3 = self._get_basis_vectors()
return np.linalg.norm(np.cross(b1,b2))*1e-16
def _plot_contour_map(self, function, X, Y, dir, filename):
"""
Function for plotting 2D images
:param function: Z values
:param X: X values
:param Y: Y values
:param dir: directory in which contour map will be saved
:param filename: filename of image
:return:
"""
if not os.path.exists(dir):
print(f"Directory {dir} does not exist. Creating directory")
os.mkdir(dir)
t = time.time()
Z = function.transpose().flatten()
xi = np.linspace(X.min(), X.max(), 1000)
yi = np.linspace(Y.min(), Y.max(), 1000)
zi = griddata((X, Y), Z, (xi[None, :], yi[:, None]), method='cubic')
plt.contourf(xi, yi, zi, 500, cmap=plt.cm.rainbow)
ax = plt.gca()
ax.set_aspect('equal')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="7%", pad=0.1)
plt.colorbar(cax=cax)
plt.savefig(dir+'/'+filename+'.png', dpi=300)
plt.close()
print (f"Plotting map takes {time.time()-t} sec")
def _get_overlap_integrals_squared(self, e, cutoff, acc_orbitals, zmin, zmax):
WF_ijk = self.WF_ijke[:, :, :, e]
xlen, ylen, zlen = np.shape(WF_ijk)
overlap_integrals_squared = np.zeros((xlen, ylen))
if np.allclose(WF_ijk, np.zeros((xlen, ylen, zlen))):
# If WF_ijk array for current energy is empty, code goes to the next energy
print(f"WF_ijk array for energy {e} is empty. Going to the next")
return overlap_integrals_squared
for i in range(xlen):
if i - cutoff < 0:
WF_ijk_rolled_x = np.roll(WF_ijk, cutoff, axis=0)
orb_rolled_x = []
for orbital in acc_orbitals:
orb_rolled_x.append(np.roll(orbital, i + cutoff, axis=0))
xmin = i
xmax = i + cutoff * 2
elif i - cutoff >= 0 and i + cutoff <= xlen:
WF_ijk_rolled_x = np.copy(WF_ijk)
orb_rolled_x = []
for orbital in acc_orbitals:
orb_rolled_x.append(np.roll(orbital, i, axis = 0))
xmin = i - cutoff
xmax = i + cutoff
elif i + cutoff > xlen:
WF_ijk_rolled_x = np.roll(WF_ijk, -cutoff, axis=0)
orb_rolled_x = []
for orbital in acc_orbitals:
orb_rolled_x.append(np.roll(orbital, i - cutoff, axis=0))
xmin = i - cutoff * 2
xmax = i
else:
print(f"ERROR: for i = {i} something with rolling arrays along x goes wrong")
for j in range(ylen):
if j - cutoff < 0:
WF_ijk_rolled = np.roll(WF_ijk_rolled_x, cutoff, axis=1)
orb_rolled = []
for orbital in orb_rolled_x:
orb_rolled.append(np.roll(orbital, j + cutoff, axis=1))
ymin = j
ymax = j + cutoff * 2
elif j - cutoff >= 0 and j + cutoff <= ylen:
WF_ijk_rolled = np.copy(WF_ijk_rolled_x)
orb_rolled = []
for orbital in orb_rolled_x:
orb_rolled.append(np.roll(orbital, j, axis=1))
ymin = j - cutoff
ymax = j + cutoff
elif j + cutoff > ylen:
WF_ijk_rolled = np.roll(WF_ijk_rolled_x, -cutoff, axis=1)
orb_rolled = []
for orbital in orb_rolled_x:
orb_rolled.append(np.roll(orbital, j - cutoff, axis=1))
ymin = j - cutoff * 2
ymax = j
else:
print(f"ERROR: for i = {i} something with rolling arrays along y goes wrong")
integral = []
for orbital in orb_rolled:
integral.append(np.linalg.norm(WF_ijk_rolled[xmin:xmax, ymin:ymax, zmin:zmax]*\
orbital[xmin:xmax, ymin:ymax, zmin:zmax]))
overlap_integrals_squared[i][j] = max(integral) ** 2
return overlap_integrals_squared
def generate_acceptor_orbitals(self, orb_type, shape, z_shift=0, x_shift=0, y_shift=0):
"""
This is generator of acceptor orbitals using tip_types.py module
:return:
"""
bohr_radius = 0.529 #TODO better to get it from core.constants
b1, b2, b3 = self._get_basis_vectors()
bn1 = b1 / shape[0]
bn2 = b2 / shape[1]
bn3 = b3 / shape[2]
basis = np.array([bn1, bn2, bn3])
transition_matrix = basis.transpose()
numer_of_orbitals = len(tip_types.orbitals(orb_type))
acc_orbitals = []
for i in range(numer_of_orbitals):
acc_orbitals.append(np.zeros(shape))
for i in range(shape[0]):
for j in range(shape[1]):
for k in range(shape[2]):
if i - x_shift >= shape[0] / 2:
x = i - shape[0] - x_shift
else:
x = i - x_shift
if j - y_shift >= shape[1] / 2:
y = j - shape[1] - y_shift
else:
y = j - y_shift
if k - z_shift >= shape[2] / 2:
z = k - shape[2] - z_shift
else:
z = k - z_shift
r = np.dot(transition_matrix, np.array([x, y, z])) / bohr_radius
for o, orbital in enumerate(acc_orbitals):
orbital[i][j][k] = tip_types.orbitals(orb_type)[o](r)
return acc_orbitals
def calculate_2D_ECSTM(self, z_position, tip_type='oxygen', dir='ECSTM', cutoff_in_Angstroms=5, all_z=False, from_CD=False):
"""
This function calculate 2D ECSTM images using GerisherMarcus module
:param z_position: position of tip under the investigated surface
:param tip_type: Type of tip orbital. Currenty only "oxygen" is available
:param dir: directiry to save images
:param cutoff_in_Angstroms: Cutoff over x,y and z, when overlap integral is calculated.
E.g. for x direction area of integration will be from x-cutoff to x+cutoff
:return:
"""
print(f"Starting to calculate 2D ECSTM; tip_type = {tip_type};")
xlen, ylen, zlen, elen = np.shape(self.WF_ijke)
b1, b2, b3 = self._get_basis_vectors()
bn1 = b1 / xlen
bn2 = b2 / ylen
bn3 = b3 / zlen
print (bn1, bn2, xlen, ylen)
if b3[0] != 0.0 or b3[1] != 0.0:
print("WARNING! You z_vector is not perpendicular to XY plane, Check calculate_2D_STM function")
z = int(zlen // 2 + z_position // np.linalg.norm(bn3))
real_z_position = z_position // np.linalg.norm(bn3) * np.linalg.norm(bn3)
print(f"Real z_position of tip = {real_z_position}")
R = []
for j in range(ylen):
for i in range(xlen):
R.append(i * bn1 + j * bn2)
X = np.array([x[0] for x in R])
Y = np.array([x[1] for x in R])
if any(tip_type == kw for kw in self.AVAILABLE_TIPS_TYPES):
cutoff = int(cutoff_in_Angstroms // np.linalg.norm(bn1))
if cutoff > xlen // 2 or cutoff > ylen // 2:
print("ERROR: Cutoff should be less than 1/2 of each dimension of the cell. "
"Try to reduce cutoff. Otherwise, result could be unpredictible")
print(f"Cutoff in int = {cutoff}")
ECSTM_ij = np.zeros((xlen, ylen))
acc_orbitals = self.generate_acceptor_orbitals(tip_type, (xlen, ylen, zlen), z_shift=z)
if all_z == True:
zmin = 0
zmax = zlen
elif z - cutoff >= 0 and z + cutoff <= zlen:
zmin = z - cutoff
zmax = z + cutoff
else:
print("Can't reduce integrating area in z dimention. "
"Will calculate overlapping for all z. You can ignore this message "
"if you don't care about efficiency")
zmin = 0
zmax = zlen
self._calculate_distributions()
for e in range(elen):
if self.y_redox[e] < self.threshold_value or self.y_fermi[e] < self.threshold_value:
continue
overlap_integrals_squared = self._get_overlap_integrals_squared(e, cutoff, acc_orbitals, zmin, zmax)
matrix_elements_squared = overlap_integrals_squared * self.linear_constant * np.linalg.norm(bn3)**3
#print ('Hif = ', matrix_elements_squared, 'eV')
#kappa = self._get_kappa(matrix_elements_squared)
#ECSTM_ij += kappa * self.y_fermi[e] * self.DOS[e] * self.y_redox[e] * self.dE * 2 * math.pi *\
# self.ELEM_CHARGE / self.PLANCK_CONSTANT
ECSTM_ij += 2 * np.pi / self.PLANCK_CONSTANT * matrix_elements_squared * self.y_fermi[e] * self.DOS[e] * self.y_redox[e] * self.dE
elif tip_type == 's':
ECSTM_ij = np.zeros((xlen, ylen))
self._calculate_distributions()
for e in range(elen):
if self.y_redox[e] < self.threshold_value or self.y_fermi[e] < self.threshold_value:
continue
if from_CD == True:
matrix_elements_squared = self.CD_ijke[:, :, z, e]
ECSTM_ij += 2 * np.pi / self.PLANCK_CONSTANT * matrix_elements_squared * self.y_fermi[e] * self.DOS[e] * self.y_redox[e] * self.dE
else:
matrix_elements_squared = np.abs(self.WF_ijke[:, :, z, e]) ** 2
#print ('Hif = ', matrix_elements_squared, 'eV')
#kappa = self._get_kappa(matrix_elements_squared)
#ECSTM_ij += kappa * self.y_fermi[e] * self.DOS[e] * self.y_redox[e] * self.dE * 2 * math.pi *\
# self.ELEM_CHARGE / self.PLANCK_CONSTANT\
#TODO - check formula!!!
ECSTM_ij += 2 * np.pi / self.PLANCK_CONSTANT * matrix_elements_squared * self.y_fermi[e] * self.DOS[e] * self.y_redox[e] * self.dE
elif tip_type == 'pz':
ECSTM_ij = np.zeros((xlen, ylen))
self._calculate_distributions()
for e in range(elen):
if self.y_redox[e] < self.threshold_value or self.y_fermi[e] < self.threshold_value:
continue
grad_z_WF_for_e = np.gradient(self.WF_ijke[:, :, :, e], axis=2)
matrix_elements_squared = (np.abs(grad_z_WF_for_e[:, :, z])) ** 2
print ('Hif = ', matrix_elements_squared, 'eV')
#kappa = self._get_kappa(matrix_elements_squared)
#ECSTM_ij += kappa * self.y_fermi[e] * self.DOS[e] * self.y_redox[e] * self.dE * 2 * math.pi *\
# self.ELEM_CHARGE / self.PLANCK_CONSTANT
ECSTM_ij += 2 * np.pi / self.PLANCK_CONSTANT * matrix_elements_squared * self.y_fermi[e] * self.DOS[e] * self.y_redox[e] * self.dE
if from_CD:
cd_flag='from_CD'
else:
cd_flag=''
filename = f"ecstm_{'%.2f'%real_z_position}_{tip_type}_{cd_flag}_{self.V_std}_{self.overpot}_{self.lambda_}"
self._plot_contour_map(ECSTM_ij, X, Y, dir, filename)
np.save(dir+'/'+filename, ECSTM_ij)
def calculate_2D_STM(self, STM_energy_range, z_position, tip_type='s', dir='STM_2D', cutoff_in_Angstroms=5, all_z=False, from_CD=False):
"""
Function for calculation 2D STM images
:param STM_energy_range: Energy range regarding Fermi level
:param z_position: Position of tip under the surface. This distance is in Angstroms assuming
that investigated surface XY is in the center of calculation cell (the middle of z axes)
:param tip_type: type of tip orbital
:param dir: directory for saving images
:param cutoff_in_Angstroms: Cutoff over x,y and z, when overlap integral is calculated.
E.g. for x direction area of integration will be from x-cutoff to x+cutoff
:return:
"""
emin = int((STM_energy_range[0] - self.energy_range[0]) / self.dE)
emax = int((STM_energy_range[1] - self.energy_range[0]) / self.dE)
print(f"Starting to calculate 2D STM; tip_type = {tip_type}; STM energy range = {STM_energy_range}")
e_array = np.arange(emin, emax)
xlen, ylen, zlen, elen = np.shape(self.WF_ijke)
b1, b2, b3 = self._get_basis_vectors()
bn1 = b1 / xlen
bn2 = b2 / ylen
bn3 = b3 / zlen
if b3[0] != 0.0 or b3[1] != 0.0 :
print("WARNING! You z_vector is not perpendicular to XY plane, Check calculate_2D_STM function")
z = int(zlen // 2 + z_position // np.linalg.norm(bn3))
real_z_position = z_position // np.linalg.norm(bn3) * np.linalg.norm(bn3)
print(f"Real z_position of tip = {real_z_position}")
R = []
for j in range(ylen):
for i in range(xlen):
R.append(i * bn1 + j * bn2)
X = np.array([x[0] for x in R])
Y = np.array([x[1] for x in R])
if any(tip_type == kw for kw in self.AVAILABLE_TIPS_TYPES):
cutoff = int(cutoff_in_Angstroms // np.linalg.norm(bn1))
if cutoff > xlen//2 or cutoff > ylen//2:
print("ERROR: Cutoff should be less than 1/2 of each dimension of the cell. "
"Try to reduce cutoff. Otherwise, result could be unpredictible")
print (f"Cutoff in int = {cutoff}")
STM_ij = np.zeros((xlen, ylen))
acc_orbitals = self.generate_acceptor_orbitals(tip_type, (xlen, ylen, zlen), z_shift=z)
if all_z == True:
zmin = 0
zmax = zlen
elif z - cutoff >=0 and z + cutoff <= zlen:
zmin = z-cutoff
zmax = z+cutoff
else:
print("Can't reduce integrating area in z dimention. "
"Will calculate overlapping for all z. You can ignore this message "
"if you don't care about efficiency")
zmin = 0
zmax = zlen
for e in e_array:
overlap_integrals_squared = self._get_overlap_integrals_squared(e, cutoff, acc_orbitals, zmin, zmax)
STM_ij += overlap_integrals_squared
elif tip_type == 's':
if from_CD == True:
STM_ij = np.zeros((xlen, ylen))
for e in e_array:
STM_ij += self.CD_ijke[:, :, z, e]
else:
STM_ij = np.zeros((xlen, ylen))
for e in e_array:
STM_ij += np.abs(self.WF_ijke[:, :, z, e])**2
elif tip_type == 'pz':
STM_ij = np.zeros((xlen, ylen))
for e in e_array:
grad_z_WF_for_e = np.gradient(self.WF_ijke[:, :, :, e], axis=2)
STM_ij += (np.abs(grad_z_WF_for_e[:, :, z])) ** 2
elif tip_type == 'px':
STM_ij = np.zeros((xlen, ylen))
for e in e_array:
grad_x_WF_for_e = np.gradient(self.WF_ijke[:, :, z, e], axis=0)
STM_ij += (np.abs(grad_x_WF_for_e)) ** 2
elif tip_type == 'p30':
STM_ij = np.zeros((xlen, ylen))
for e in e_array:
grad_x_WF_for_e = np.gradient(self.WF_ijke[:, :, z, e], axis=0)
grad_y_WF_for_e = np.gradient(self.WF_ijke[:, :, z, e], axis=1)
STM_ij += (np.abs(grad_y_WF_for_e + grad_x_WF_for_e)) ** 2
elif tip_type == 'p60':
STM_ij = np.zeros((xlen, ylen))
for e in e_array:
grad_y_WF_for_e = np.gradient(self.WF_ijke[:, :, z, e], axis=1)
STM_ij += (np.abs(grad_y_WF_for_e)) ** 2
elif tip_type == 'p90':
STM_ij = np.zeros((xlen, ylen))
for e in e_array:
grad_x_WF_for_e = np.gradient(self.WF_ijke[:, :, z, e], axis=0)
grad_y_WF_for_e = np.gradient(self.WF_ijke[:, :, z, e], axis=1)
STM_ij += (np.abs(grad_y_WF_for_e - grad_x_WF_for_e / 2.0)) ** 2
elif tip_type == 'p120':
STM_ij = np.zeros((xlen, ylen))
for e in e_array:
grad_x_WF_for_e = np.gradient(self.WF_ijke[:, :, z, e], axis=0)
grad_y_WF_for_e = np.gradient(self.WF_ijke[:, :, z, e], axis=1)
STM_ij += (np.abs(grad_y_WF_for_e - grad_x_WF_for_e)) ** 2
elif tip_type == 'p150':
STM_ij = np.zeros((xlen, ylen))
for e in e_array:
grad_x_WF_for_e = np.gradient(self.WF_ijke[:, :, z, e], axis=0)
grad_y_WF_for_e = np.gradient(self.WF_ijke[:, :, z, e], axis=1)
STM_ij += (np.abs(grad_y_WF_for_e / 2.0 - grad_x_WF_for_e)) ** 2
elif tip_type == 's+pz':
STM_ij = np.zeros((xlen, ylen))
for e in e_array:
grad_z_WF_for_e = np.gradient(self.WF_ijke[:, :, :, e], axis=2)
STM_ij += (np.abs(self.WF_ijke[:, :, z, e] + grad_z_WF_for_e[:, :, z])) ** 2
if from_CD:
cd_flag='from_CD'
else:
cd_flag=''
filename = 'stm_' + str('%.2f' % real_z_position) + '_' + tip_type + '_' + cd_flag
self._plot_contour_map(STM_ij, X, Y, dir, filename)
np.save(dir+'/'+filename, STM_ij)
def calculate_3D_STM(self, STM_energy_range, tip_type='s', dir='STM_3D', format='cube', from_CD=False):
emin = int((STM_energy_range[0] - self.energy_range[0]) / self.dE)
emax = int((STM_energy_range[1] - self.energy_range[0]) / self.dE)
print ('Starting to calculate 3D STM; tip_type ='+tip_type+'; STM energy range = ', STM_energy_range)
e_array = np.arange(emin, emax)
xlen, ylen, zlen, elen = np.shape(self.WF_ijke)
if tip_type == 's':
if from_CD == True:
STM_ijk = np.zeros((xlen, ylen, zlen))
for e in e_array:
STM_ijk += self.CD_ijke[:, :, :, e]
else:
STM_ijk = np.zeros((xlen, ylen, zlen))
for e in e_array:
STM_ijk += np.abs(self.WF_ijke[:, :, :, e]) ** 2
elif tip_type == 'pz':
STM_ijk = np.zeros((xlen, ylen, zlen))
for e in e_array:
grad_z_WF_for_e = np.gradient(self.WF_ijke[:, :, :, e], axis=2)
STM_ijk += (np.abs(grad_z_WF_for_e)) ** 2
elif tip_type == 'px':
STM_ijk = np.zeros((xlen, ylen, zlen))
for e in e_array:
grad_x_WF_for_e = np.gradient(self.WF_ijke[:, :, :, e], axis=0)
STM_ijk += (np.abs(grad_x_WF_for_e)) ** 2
elif tip_type == 'p30':
STM_ijk = np.zeros((xlen, ylen, zlen))
for e in e_array:
grad_x_WF_for_e = np.gradient(self.WF_ijke[:, :, :, e], axis=0)
grad_y_WF_for_e = np.gradient(self.WF_ijke[:, :, :, e], axis=1)
STM_ijk += (np.abs(grad_y_WF_for_e + grad_x_WF_for_e)) ** 2
elif tip_type == 'p60':
STM_ijk = np.zeros((xlen, ylen, zlen))
for e in e_array:
grad_y_WF_for_e = np.gradient(self.WF_ijke[:, :, :, e], axis=1)
STM_ijk += (np.abs(grad_y_WF_for_e)) ** 2
elif tip_type == 'p90':
STM_ijk = np.zeros((xlen, ylen, zlen))
for e in e_array:
grad_x_WF_for_e = np.gradient(self.WF_ijke[:, :, :, e], axis=0)
grad_y_WF_for_e = np.gradient(self.WF_ijke[:, :, :, e], axis=1)
STM_ijk += (np.abs(grad_y_WF_for_e - grad_x_WF_for_e / 2.0)) ** 2
elif tip_type == 'p120':
STM_ijk = np.zeros((xlen, ylen, zlen))
for e in e_array:
grad_x_WF_for_e = np.gradient(self.WF_ijke[:, :, :, e], axis=0)
grad_y_WF_for_e = np.gradient(self.WF_ijke[:, :, :, e], axis=1)
STM_ijk += (np.abs(grad_y_WF_for_e - grad_x_WF_for_e)) ** 2
elif tip_type == 'p150':
STM_ijk = np.zeros((xlen, ylen, zlen))
for e in e_array:
grad_x_WF_for_e = np.gradient(self.WF_ijke[:, :, :, e], axis=0)
grad_y_WF_for_e = np.gradient(self.WF_ijke[:, :, :, e], axis=1)
STM_ijk += (np.abs(grad_y_WF_for_e / 2.0 - grad_x_WF_for_e)) ** 2
elif tip_type == 's+pz':
STM_ijk = np.zeros((xlen, ylen, zlen))
for e in e_array:
grad_z_WF_for_e = np.gradient(self.WF_ijke[:, :, :, e], axis=2)
STM_ijk += (np.abs(self.WF_ijke[:, :, :, e] + grad_z_WF_for_e)) ** 2
if not os.path.exists(dir):
print(f"Directory {dir} does not exist. Creating directory")
os.mkdir(dir)
filename = 'stm_'+str(STM_energy_range[0])+'_'+str(STM_energy_range[1])+'_'+tip_type
if format == 'vasp':
self.save_as_vasp(STM_ijk, filename, dir)
elif format == 'cube':
self.save_as_cube(STM_ijk, filename, dir)
if __name__=="__main__":
t = time.time()
stm = STM(path_to_data='Saved_data', poscar_path='../POSCAR')
stm.load_data()
###CALCULATE 2D STM IMAGES###
#stm.calculate_2D_STM((-0.5, 0.0), 3.5, tip_type='oxygen', cutoff_in_Angstroms=5)
#stm.calculate_2D_STM((-0.5, 0.0), 3.0, tip_type='s')
#stm.calculate_2D_STM((-0.5, 0.0), 3.0, tip_type='pz')
#stm.calculate_2D_STM((-0.5, 0.0), 3.0, tip_type='s+pz')
#stm.calculate_2D_STM((-0.5, 0.0), 4.5, tip_type='IrCl6', cutoff_in_Angstroms=8)
#stm.calculate_2D_STM((-0.5, 0.0), 5.5, tip_type='RuNH3_6_NNN_plane', cutoff_in_Angstroms=8)
###CALCULATE 2D ECSTM IMAGES###
stm.load_data_for_ecstm(outcar_path='../OUTCAR', locpot_path='../LOCPOT')
#for V_std in [-1.0, -0.5, 0.0, 0.5, 1.0]:
#for V_std in [-0.5]:
# stm.set_ecstm_parameters(C_EDL=20, T=298, lambda_=0.9, V_std=V_std,
# overpot=0, effective_freq = 1E13, linear_constant=26, threshold_value=1e-5)
# stm.plot_distributions(E_range=[-7, 4], dE=0.05, sigma=1, fill_area_lower_Fermi_lvl=True,
# plot_Fermi_Dirac_distib=False, plot_redox_distrib=True)
# #stm.calculate_2D_ECSTM(z_position=4.5, tip_type='IrCl6', cutoff_in_Angstroms=6)
# #stm.calculate_2D_ECSTM(z_position=3.0, tip_type='s')
# #stm.calculate_2D_ECSTM(z_position=3.5, tip_type='pz')
# stm.calculate_2D_ECSTM(z_position=4.5, tip_type='s')
#stm.set_ecstm_parameters(C_EDL=20, T=298, lambda_=1.63, V_std=-0.6,
# overpot=0, effective_freq = 1E13, linear_constant=26, threshold_value=1e-5)
#stm.plot_distributions(E_range=[-7, 4], dE=0.05, sigma=1, fill_area_lower_Fermi_lvl=True,
# plot_Fermi_Dirac_distib=False, plot_redox_distrib=True)
#stm.calculate_2D_ECSTM(z_position=3.5, tip_type='oxygen', cutoff_in_Angstroms=5)
#stm.set_ecstm_parameters(C_EDL=20, T=298, lambda_=0.8, V_std=0.1,
# overpot=0, effective_freq = 1E13, linear_constant=26, threshold_value=1e-5)
#stm.plot_distributions(E_range=[-7, 4], dE=0.05, sigma=1, fill_area_lower_Fermi_lvl=True,
# plot_Fermi_Dirac_distib=False, plot_redox_distrib=True)
#stm.calculate_2D_ECSTM(z_position=5.5, tip_type='RuNH3_6_NNN_plane', cutoff_in_Angstroms=8)
#stm.calculate_2D_ECSTM(z_position=3.0, tip_type='s')
#stm.calculate_2D_ECSTM(z_position=3.0, tip_type='pz')
#stm.calculate_2D_ECSTM(z_position=5.0, tip_type='s')
#stm.calculate_2D_ECSTM(z_position=5.0, tip_type='pz')
#stm.calculate_2D_ECSTM(z_position=4.5, tip_type='s')
#stm.calculate_2D_ECSTM(z_position=4.5, tip_type='pz')
stm.set_ecstm_parameters(C_EDL=20, T=298, lambda_=1.2, V_std=0.87,
overpot=0, effective_freq = 1E13, linear_constant=26, threshold_value=1e-5)
stm.plot_distributions(E_range=[-7, 4], dE=0.05, sigma=1, fill_area_lower_Fermi_lvl=True,
plot_Fermi_Dirac_distib=False, plot_redox_distrib=True)
stm.calculate_2D_ECSTM(z_position=5.5, tip_type='IrCl6', cutoff_in_Angstroms=6)
#stm.calculate_2D_ECSTM(z_position=3.0, tip_type='s')
#stm.calculate_2D_ECSTM(z_position=3.0, tip_type='pz')
#stm.calculate_2D_ECSTM(z_position=5.0, tip_type='s')
#stm.calculate_2D_ECSTM(z_position=5.0, tip_type='pz')
#stm.calculate_2D_ECSTM(z_position=4.5, tip_type='s')
#stm.calculate_2D_ECSTM(z_position=4.5, tip_type='pz')
#stm.set_ecstm_parameters(C_EDL=20, T=298, lambda_=0.9, V_std=0.0,
# overpot=0, effective_freq = 1E13, linear_constant=26, threshold_value=1e-5)
#stm.plot_distributions(E_range=[-7, 4], dE=0.05, sigma=1, fill_area_lower_Fermi_lvl=True,
# plot_Fermi_Dirac_distib=False, plot_redox_distrib=True)
#stm.calculate_2D_ECSTM(z_position=3.0, tip_type='oxygen', cutoff_in_Angstroms=5)
#stm.set_ecstm_parameters(C_EDL=20, T=298, lambda_= 1.0, V_std=-0.5,
# overpot=0, effective_freq = 1E13, linear_constant=26, threshold_value=1e-5)
#stm.plot_distributions(E_range=[-7, 4], dE=0.05, sigma=1, fill_area_lower_Fermi_lvl=True,
# plot_Fermi_Dirac_distib=False, plot_redox_distrib=True)
#stm.calculate_2D_ECSTM(z_position=3.0, tip_type='oxygen', cutoff_in_Angstroms=5)
#stm.calculate_2D_ECSTM(z_position=5, tip_type='RuNH3_6_NNN_plane', cutoff_in_Angstroms=8, all_z=True)
#stm.calculate_2D_ECSTM(z_position=7, tip_type='RuNH3_6_NNN_plane', cutoff_in_Angstroms=8, all_z=True)
#stm.calculate_2D_ECSTM(z_position=4.5, tip_type='oxygen', cutoff_in_Angstroms=5)
#stm.calculate_2D_ECSTM(z_position=3.5, tip_type='oxygen', cutoff_in_Angstroms=5)
##CACLULATE 3D STM IMAGE OPENABLE WITH VESTA PACKAGE###
#stm.calculate_3D_STM((-0.5, 0.0), tip_type='s', format='cube')
#stm.calculate_3D_STM((0.0, 0.5), tip_type='s', format='cube')
###CHECK OXYGEN ORBITAL GENERATION###
#acc_orbitals = stm.generate_acceptor_orbitals('RuNH3_6_NNN_plane', np.shape(stm.WF_ijke)[0:3], z_shift = 20, x_shift=50, y_shift=50)
#for i, orbitals in enumerate(acc_orbitals):
# stm.save_as_cube(orbitals, 'RuNH3_NNN'+str(i), 'orbitals')
#acc_orbitals = stm.generate_acceptor_orbitals('RuNH3_6', np.shape(stm.WF_ijke)[0:3], z_shift = 20, x_shift=50, y_shift=50)
#for i, orbitals in enumerate(acc_orbitals):
# stm.save_as_cube(orbitals, 'RuNH3_standart_orient'+str(i), 'orbitals')
#acc_orbitals = stm.generate_acceptor_orbitals('RuNH3_6_perpendicular', np.shape(stm.WF_ijke)[0:3], z_shift = 20, x_shift=50, y_shift=50)
#for i, orbitals in enumerate(acc_orbitals):
# stm.save_as_cube(orbitals, 'RuNH3_perpendicular'+str(i), 'orbitals')
#acc_orbitals = stm.generate_acceptor_orbitals('oxygen_parallel_y', np.shape(stm.WF_ijke)[0:3], z_shift = 20, x_shift=50, y_shift=50)
#for i, orbitals in enumerate(acc_orbitals):
# stm.save_as_vasp(orbitals, 'ox_par_y_orb_'+str(i), 'orbitals')
#print(f"Job done! It takes: {time.time()-t} sec")

View File

@@ -0,0 +1,398 @@
import numpy as np
import math
def read_orbital_data(filename):
data=[]
with open(filename) as inp:
lines = inp.readlines()
for line in lines:
arr = line.strip().split()
try:
if arr[0] == 'GTF:':
a = list(line[44:61])
for n, i in enumerate(a):
if i=='D':
a[n]='E'
a = float(''.join(a))
if a!=0:
b = list(line[65:79])
for n, i in enumerate(b):
if i == 'D':
b[n] = 'E'
b = float(''.join(b))
type = line[32:36].strip()
type = type.replace(" ", "")
cen = line[18:21]
data.append([int(cen)-1, type, a, b])
except:
pass
return data
def get_mol_orbital(data, centers):
def inner_func(r):
r = np.array(r)
result = 0
for dat in data:
c_num, type, weight, exp = dat
c = centers[c_num]
R2center = (np.linalg.norm(r - c)) ** 2
if type == 'S':
result += weight * math.exp(-exp * R2center)
elif type == 'X':
result += weight * r[0] * math.exp(-exp * R2center)
elif type == 'Y':
result += weight * r[1] * math.exp(-exp * R2center)
elif type == 'Z':
result += weight * r[2] * math.exp(-exp * R2center)
elif type == 'XX':
result += weight * r[0] * r[0] * math.exp(-exp * R2center)
elif type == 'YY':
result += weight * r[1] * r[1] * math.exp(-exp * R2center)
elif type == 'ZZ':
result += weight * r[2] * r[2] * math.exp(-exp * R2center)
elif type == 'XY':
result += weight * r[0] * r[1] * math.exp(-exp * R2center)
elif type == 'XZ':
result += weight * r[0] * r[2] * math.exp(-exp * R2center)
elif type == 'YZ':
result += weight * r[1] * r[2] * math.exp(-exp * R2center)
else:
print('Error! This type is not described! type = ', type)
return result
return inner_func
def orbitals(tip_type):
if tip_type == 'oxygen':
O2_centers = [[0, 0, -1.188061], [0, 0, 1.188061]]
O2_26_data = [[0, 'X', 1.59780706, 15.539616], [0, 'X', 1.23102421, 3.5999336],\
[0, 'X', 0.540485934, 1.0137618], [0, 'X', 0.116494955, 0.27000582],\
[1, 'X', -1.59780706, 15.539616], [1, 'X', -1.23102421, 3.5999336],\
[1, 'X', -0.540485934, 1.0137618], [1, 'X', -0.116494955, 0.27000582]]
O2_27_data = [[0, 'Y', 1.59780706, 15.539616], [0, 'Y', 1.23102421, 3.5999336],\
[0, 'Y', 0.540485934, 1.0137618], [0, 'Y', 0.116494955, 0.27000582],\
[1, 'Y', -1.59780706, 15.539616], [1, 'Y', -1.23102421, 3.5999336],\
[1, 'Y', -0.540485934, 1.0137618], [1, 'Y', -0.116494955, 0.27000582]]
O2_POMO_26 = get_mol_orbital(O2_26_data, O2_centers)
O2_POMO_27 = get_mol_orbital(O2_27_data, O2_centers)
return [O2_POMO_26, O2_POMO_27]
elif tip_type == 'IrCl6':
IrCl6_centers = [[0.0, 0.0, 0.0], [0.0, 0.0, 4.791539], [0.0, 4.791539, 0.0], [-4.791539, 0.0, 0.0],\
[0.0, 0.0, -4.791539], [0.0, -4.791539, 0.0], [4.791539, 0.0, 0.0]]
IrCl6_29_data = [[0, 'YZ', 1.53018648, 1.24], [0, 'YZ', 0.316517948, 0.4647], [0, 'YZ', 0.0244957551, 0.1529],\
[1, 'Y', 0.143821066, 6.296], [1, 'Y', -0.129983297, 0.6333], [1, 'Y', -0.0361555127, 0.1819],\
[2, 'Z', 0.143821066, 6.296], [2, 'Z', -0.129983297, 0.6333], [2, 'Z', -0.0361555127, 0.1819],\
[4, 'Y', -0.143821066, 6.296], [4, 'Y', 0.129983297, 0.6333], [4, 'Y', 0.0361555127, 0.1819],\
[5, 'Z', -0.143821066, 6.296], [5, 'Z', 0.129983297, 0.6333], [5, 'Z', 0.0361555127, 0.1819]]
IrCl6_30_data = [[0, 'XZ', 1.53018648, 1.24], [0, 'XZ', 0.316517948, 0.4647], [0, 'XZ', 0.0244957551, 0.1529],\
[1, 'X', 0.143821066, 6.296], [1, 'X', -0.129983297, 0.6333], [1, 'X', -0.0361555127, 0.1819],\
[3, 'Z', -0.143821066, 6.296], [3, 'Z', 0.129983297, 0.6333], [3, 'Z', 0.0361555127, 0.1819],\
[4, 'X', -0.143821066, 6.296], [4, 'X', 0.129983297, 0.6333], [4, 'X', 0.0361555127, 0.1819],\
[6, 'Z', 0.143821066, 6.296], [6, 'Z', -0.129983297, 0.6333], [6, 'Z', -0.0361555127, 0.1819]]
IrCl6_31_data = [[0, 'XY', 1.53018648, 1.24], [0, 'XY', 0.316517948, 0.4647], [0, 'XY', 0.0244957551, 0.1529],\
[2, 'X', 0.143821066, 6.296], [2, 'X', -0.129983297, 0.6333], [2, 'X', -0.0361555127, 0.1819],\
[3, 'Y', -0.143821066, 6.296], [3, 'Y', 0.129983297, 0.6333], [3, 'Y', 0.0361555127, 0.1819],\
[5, 'X', -0.143821066, 6.296], [5, 'X', 0.129983297, 0.6333], [5, 'X', 0.0361555127, 0.1819],\
[6, 'Y', 0.143821066, 6.296], [6, 'Y', -0.129983297, 0.6333], [6, 'Y', -0.0361555127, 0.1819]]
IrCl6_29 = get_mol_orbital(IrCl6_29_data, IrCl6_centers)
IrCl6_30 = get_mol_orbital(IrCl6_30_data, IrCl6_centers)
IrCl6_31 = get_mol_orbital(IrCl6_31_data, IrCl6_centers)
return [IrCl6_29, IrCl6_30, IrCl6_31]
elif tip_type == 'RuNH3_6':
RuNH3_6_centers = [[-1.73717655E-03, 2.23398325E-03, 5.95596999E-04], [2.80310681E+00, 1.05571952E+00, -2.92279419E+00],\
[3.11970961E+00, -3.93834154E-01, -4.17275494E+00], [2.20095082E+00, 2.57871173E+00, -3.96265199E+00],\
[2.86486865E+00, 6.10478919E-01, 2.99160626E+00], [2.39084949E+00, -2.98868166E-01, 4.63779676E+00],\
[4.60991448E+00, -5.89373268E-02, 2.47213750E+00], [-1.16539862E+00, 4.01564374E+00, 3.25100126E-01],\
[-1.81856108E+00, 4.42909277E+00, 2.10458406E+00], [3.14711646E-01, 5.21963568E+00, -3.05318533E-02],\
[1.21468221E+00, -3.99589459E+00, -3.05565040E-01], [1.28176071E+00, -4.84983673E+00, 1.43473405E+00],\
[2.84949113E-02, -5.05488013E+00, -1.41702627E+00], [-2.87292734E+00, -6.18033007E-01, -2.98146293E+00],\
[-3.64380554E+00, -2.39428507E+00, -2.87082722E+00], [-4.34685477E+00, 6.34642890E-01, -2.85350843E+00],\
[-2.83649579E+00, -1.07571542E+00, 2.88962265E+00], [-2.30623961E+00, -5.65363155E-01, 4.68553427E+00],\
[-4.54655856E+00, -2.25539186E-01, 2.54576816E+00], [-3.15571140E+00, -2.98787230E+00, 2.91511808E+00],\
[-2.12017233E+00, -4.36184326E-01, -4.75944808E+00], [2.98933039E+00, -4.14797331E+00, -1.07307692E+00],\
[4.52142190E+00, 1.53602082E+00, -2.16060783E+00], [3.08027187E+00, 2.48844379E+00, 3.42328804E+00],\
[-2.57792824E+00, 4.48333677E+00, -9.20281760E-01]]
RuNH3_6_35_data = [[0, 'S', -5.90938527e-05, 1.508], [0, 'S', -6.69986041e-05, 0.5129], [0, 'S', 7.19232404e-05, 0.1362],\
[0, 'S', 0.000667825176, 2.565], [0, 'S', -0.00057234006, 1.508], [0, 'S', -0.000107459153, 0.5129],\
[0, 'S', 3.48252636e-05, 0.0417], [0, 'X', -0.000852926268, 4.859], [0, 'X', 0.00119048253, 1.219],\
[0, 'X', 0.000164935069, 0.4413], [0, 'Y', -6.78621664e-05, 4.859], [0, 'Y', 9.47194692e-05, 1.219],\
[0, 'Y', 1.31228824e-05, 0.4413], [0, 'Z', 0.000373774035, 4.859], [0, 'Z', -0.000521699794, 1.219],\
[0, 'Z', -7.22787521e-05, 0.4413], [0, 'X', -0.000230489569, 0.5725], [0, 'X', 0.000240730695, 0.083],\
[0, 'Y', 0.000424795214, 0.5725], [0, 'Y', -0.000443669739, 0.083], [0, 'Z', -0.000188882268, 0.5725],\
[0, 'Z', 0.000197274696, 0.083], [0, 'X', -1.04499827e-05, 0.025], [0, 'Y', 2.0331508e-05, 0.025],\
[0, 'Z', -8.67202565e-06, 0.025], [0, 'XX', -0.295461307, 4.195], [0, 'XX', -0.442053419, 1.377],\
[0, 'XX', -0.0792638374, 0.4828], [0, 'YY', -0.0672194557, 4.195], [0, 'YY', -0.100570158, 1.377],\
[0, 'YY', -0.0180330618, 0.4828], [0, 'ZZ', 0.362680763, 4.195], [0, 'ZZ', 0.542623577, 1.377],\
[0, 'ZZ', 0.0972968992, 0.4828], [0, 'XY', 0.0475038166, 4.195], [0, 'XY', 0.071072672, 1.377],\
[0, 'XY', 0.0127439184, 0.4828], [0, 'XZ', -0.360816255, 4.195], [0, 'XZ', -0.539834, 1.377],\
[0, 'XZ', -0.0967967049, 0.4828], [0, 'YZ', 1.21237734, 4.195], [0, 'YZ', 1.81389419, 1.377],\
[0, 'YZ', 0.325246244, 0.4828], [0, 'XX', -0.00640014621, 0.1501], [0, 'YY', -0.0015460197, 0.1501],\
[0, 'ZZ', 0.00794616591, 0.1501], [0, 'XY', 0.000762713575, 0.1501], [0, 'XZ', -0.0078740316, 0.1501],\
[0, 'YZ', 0.0264934952, 0.1501], [1, 'S', 8.84456152e-05, 5909.0], [1, 'S', 0.000163160035, 887.5],\
[1, 'S', 0.000263459435, 204.7], [1, 'S', 0.000357001585, 59.84], [1, 'S', 0.000372065476, 20.0],\
[1, 'S', 0.000216286771, 7.193], [1, 'S', 3.36894315e-05, 2.686], [1, 'S', 0.000104934209, 7.193],\
[1, 'S', -0.000120618546, 0.7], [1, 'S', -0.000271470039, 0.2133], [1, 'X', 0.00260803484, 26.79],\
[1, 'X', 0.0025401732, 5.956], [1, 'X', 0.00178461332, 1.707], [1, 'X', 0.00067760306, 0.5314],\
[1, 'Y', 0.0525694372, 26.79], [1, 'Y', 0.0512015687, 5.956], [1, 'Y', 0.0359719573, 1.707],\
[1, 'Y', 0.0136582575, 0.5314], [1, 'Z', 0.0190111972, 26.79], [1, 'Z', 0.0185165217, 5.956],\
[1, 'Z', 0.013008889, 1.707], [1, 'Z', 0.0049393686, 0.5314], [1, 'X', -0.000755895033, 0.1654],\
[1, 'Y', 0.00215785904, 0.1654], [1, 'Z', -4.18680836e-06, 0.1654], [2, 'S', -0.00652499257, 19.2406],\
[2, 'S', -0.0111143259, 2.8992], [2, 'S', -0.0128500417, 0.6534], [2, 'S', -0.00736568257, 0.1776],\
[3, 'S', 0.00283887518, 19.2406], [3, 'S', 0.00483558925, 2.8992], [3, 'S', 0.00559075956, 0.6534],\
[3, 'S', 0.0019287417, 0.1776], [4, 'S', -4.25579402e-05, 5909.0], [4, 'S', -7.85087534e-05, 887.5],\
[4, 'S', -0.000126770455, 204.7], [4, 'S', -0.000171780727, 59.84], [4, 'S', -0.000179029116, 20.0],\
[4, 'S', -0.000104072084, 7.193], [4, 'S', -1.62105584e-05, 2.686], [4, 'S', -2.38121517e-05, 7.193],\
[4, 'S', 2.73713134e-05, 0.7], [4, 'S', 0.000279429803, 0.2133], [4, 'X', 0.0263895084, 26.79],\
[4, 'X', 0.0257028475, 5.956], [4, 'X', 0.0180576837, 1.707], [4, 'X', 0.00685635457, 0.5314],\
[4, 'Y', -0.0427381048, 26.79], [4, 'Y', -0.0416260498, 5.956], [4, 'Y', -0.0292446213, 1.707],\
[4, 'Y', -0.0111039431, 0.5314], [4, 'Z', -0.0163681686, 26.79], [4, 'Z', -0.0159422652, 5.956],\
[4, 'Z', -0.0112003303, 1.707], [4, 'Z', -0.00425267367, 0.5314], [4, 'X', -0.000588236873, 0.1654],\
[4, 'Y', 0.000383927762, 0.1654], [4, 'Z', 0.000887710294, 0.1654], [5, 'S', -0.00112688506, 19.2406],\
[5, 'S', -0.00191947617, 2.8992], [5, 'S', -0.00221923931, 0.6534], [5, 'S', -0.00251590545, 0.1776],\
[6, 'S', 0.00787639039, 19.2406], [6, 'S', 0.0134162252, 2.8992], [6, 'S', 0.0155114269, 0.6534],\
[6, 'S', 0.0117906265, 0.1776], [7, 'S', 0.000290643176, 5909.0], [7, 'S', 0.000536163953, 887.5],\
[7, 'S', 0.000865760125, 204.7], [7, 'S', 0.00117315114, 59.84], [7, 'S', 0.00122265294, 20.0],\
[7, 'S', 0.000710744947, 7.193], [7, 'S', 0.000110707618, 2.686], [7, 'S', 0.000580000402, 7.193],\
[7, 'S', -0.000666692074, 0.7], [7, 'S', -0.000791507988, 0.2133], [7, 'X', -0.00114263758, 26.79],\
[7, 'X', -0.00111290589, 5.956], [7, 'X', -0.000781878451, 1.707], [7, 'X', -0.000296872842, 0.5314],\
[7, 'Y', 0.00554095051, 26.79], [7, 'Y', 0.00539677374, 5.956], [7, 'Y', 0.00379153451, 1.707],\
[7, 'Y', 0.00143961459, 0.5314], [7, 'Z', -0.0664334707, 26.79], [7, 'Z', -0.064704857, 5.956],\
[7, 'Z', -0.0454587704, 1.707], [7, 'Z', -0.0172603227, 0.5314], [7, 'X', 0.000922022693, 0.1654],\
[7, 'Y', -0.000104102274, 0.1654], [7, 'Z', -0.000908609724, 0.1654], [8, 'S', -0.00838784083, 19.2406],\
[8, 'S', -0.0142874027, 2.8992], [8, 'S', -0.0165186555, 0.6534], [8, 'S', -0.0103740236, 0.1776],\
[9, 'S', 0.00116448404, 19.2406], [9, 'S', 0.00198352029, 2.8992],[9, 'S', 0.00229328515, 0.6534],\
[9, 'S', 0.000746855298, 0.1776], [10, 'S', -0.000159483962, 5909.0], [10, 'S', -0.000294208013, 887.5],\
[10, 'S', -0.000475066562, 204.7], [10, 'S', -0.000643740529, 59.84], [10, 'S', -0.000670903538, 20.0],\
[10, 'S', -0.00039000544, 7.193], [10, 'S', -6.07483366e-05, 2.686], [10, 'S', -0.000287196538, 7.193],\
[10, 'S', 0.000330123316, 0.7], [10, 'S', 0.00050688431, 0.2133], [10, 'X', 0.0112645738, 26.79],\
[10, 'X', 0.0109714671, 5.956], [10, 'X', 0.00770806744, 1.707], [10, 'X', 0.00292669006, 0.5314],\
[10, 'Y', -0.000944376614, 26.79], [10, 'Y', -0.000919803725, 5.956], [10, 'Y', -0.00064621341, 1.707],\
[10, 'Y', -0.000245361938, 0.5314], [10, 'Z', 0.0596286316, 26.79], [10, 'Z', 0.0580770813, 5.956],\
[10, 'Z', 0.0408023883, 1.707], [10, 'Z', 0.0154923326, 0.5314], [10, 'X', 0.000209517748, 0.1654],\
[10, 'Y', -0.000360297199, 0.1654], [10, 'Z', -0.000498649525, 0.1654], [11, 'S', 0.00915046467, 19.2406],\
[11, 'S', 0.0155864157, 2.8992], [11, 'S', 0.0180205344, 0.6534], [11, 'S', 0.0129436385, 0.1776],\
[12, 'S', -0.00660194617, 19.2406], [12, 'S', -0.0112454046, 2.8992], [12, 'S', -0.0130015909, 0.6534],\
[12, 'S', -0.00966746285, 0.1776], [13, 'S', 7.83519574e-05, 5909.0], [13, 'S', 0.000144539761, 887.5],\
[13, 'S', 0.000233392717, 204.7], [13, 'S', 0.000316259579, 59.84], [13, 'S', 0.000329604337, 20.0],\
[13, 'S', 0.000191603528, 7.193], [13, 'S', 2.98447006e-05, 2.686], [13, 'S', 0.000246799817, 7.193],\
[13, 'S', -0.000283688566, 0.7], [13, 'S', -6.68862239e-05, 0.2133], [13, 'X', -0.0305463365, 26.79],\
[13, 'X', -0.0297515141, 5.956], [13, 'X', -0.0209020977, 1.707], [13, 'X', -0.00793635528, 0.5314],\
[13, 'Y', 0.0315627487, 26.79], [13, 'Y', 0.0307414789, 5.956], [13, 'Y', 0.0215976033, 1.707],\
[13, 'Y', 0.00820043303, 0.5314], [13, 'Z', 0.0231505973, 26.79], [13, 'Z', 0.0225482136, 5.956],\
[13, 'Z', 0.0158413775, 1.707], [13, 'Z', 0.00601484126, 0.5314], [13, 'X', 0.000237693867, 0.1654],\
[13, 'Y', -0.00123192073, 0.1654], [13, 'Z', 0.000288092875, 0.1654], [14, 'S', -0.00402881913, 19.2406],\
[14, 'S', -0.00686247659, 2.8992], [14, 'S', -0.00793418435, 0.6534], [14, 'S', -0.00693878318, 0.1776],\
[15, 'S', 0.00845874161, 19.2406], [15, 'S', 0.0144081713, 2.8992], [15, 'S', 0.0166582845, 0.6534],\
[15, 'S', 0.0131334049, 0.1776], [16, 'S', -0.000169669121, 5909.0], [16, 'S', -0.000312997083, 887.5],\
[16, 'S', -0.000505405842, 204.7], [16, 'S', -0.000684851871, 59.84], [16, 'S', -0.000713749597, 20.0],\
[16, 'S', -0.000414912443, 7.193], [16, 'S', -6.46279209e-05, 2.686], [16, 'S', -0.000409625509, 7.193],\
[16, 'S', 0.000470851536, 0.7], [16, 'S', 0.000158871955, 0.2133], [16, 'X', -0.0106805032, 26.79],\
[16, 'X', -0.0104025942, 5.956], [16, 'X', -0.00730840251, 1.707], [16, 'X', -0.00277494056, 0.5314],\
[16, 'Y', -0.0345284702, 26.79], [16, 'Y', -0.0336300317, 5.956], [16, 'Y', -0.0236269727, 1.707],\
[16, 'Y', -0.00897096797, 0.5314], [16, 'Z', -0.0277495771, 26.79], [16, 'Z', -0.0270275269, 5.956],\
[16, 'Z', -0.0189883449, 1.707], [16, 'Z', -0.007209719, 0.5314], [16, 'X', -0.000866107884, 0.1654],\
[16, 'Y', 0.000791526817, 0.1654], [16, 'Z', -0.000995436732, 0.1654], [17, 'S', -0.00496446317, 19.2406],\
[17, 'S', -0.00845620296, 2.8992], [17, 'S', -0.00977680176, 0.6534], [17, 'S', -0.00611518732, 0.1776],\
[18, 'S', -0.00155862643, 19.2406], [18, 'S', -0.0026548815, 2.8992], [18, 'S', -0.00306949233, 0.6534],\
[18, 'S', -0.00339973617, 0.1776], [19, 'S', 0.00651256261, 19.2406], [19, 'S', 0.0110931534, 2.8992],\
[19, 'S', 0.0128255627, 0.6534], [19, 'S', 0.00960542363, 0.1776], [20, 'S', -0.00442680693, 19.2406],\
[20, 'S', -0.00754038788, 2.8992], [20, 'S', -0.00871796453, 0.6534], [20, 'S', -0.00582235994, 0.1776],\
[21, 'S', -0.00271809997, 19.2406], [21, 'S', -0.00462986716, 2.8992], [21, 'S', -0.00535291, 0.6534],\
[21, 'S', -0.00443674741, 0.1776], [22, 'S', 0.00382464248, 19.2406], [22, 'S', 0.00651469293, 2.8992],\
[22, 'S', 0.00753208757, 0.6534], [22, 'S', 0.00564203493, 0.1776], [23, 'S', -0.00685678285, 19.2406],\
[23, 'S', -0.0116794798, 2.8992], [23, 'S', -0.0135034553, 0.6534], [23, 'S', -0.0100430099, 0.1776],\
[24, 'S', 0.00744861736, 19.2406], [24, 'S', 0.0126875793, 2.8992], [24, 'S', 0.0146689889, 0.6534],\
[24, 'S', 0.0111030771, 0.1776]]
RuNH3_6_36_data = [[0, 'S', 5.56219117e-05, 1.508], [0, 'S', 6.30622352e-05, 0.5129], [0, 'S', -6.76975344e-05, 0.1362],\
[0, 'S', -0.000531100526, 2.565], [0, 'S', 0.000455164192, 1.508], [0, 'S', 8.54589118e-05, 0.5129],\
[0, 'S', -4.21526227e-05, 0.0417], [0, 'X', 0.00055829555, 4.859], [0, 'X', -0.00077924801, 1.219],\
[0, 'X', -0.000107960698, 0.4413], [0, 'Y', -0.000233384918, 4.859], [0, 'Y', 0.000325749924, 1.219],\
[0, 'Y', 4.51309322e-05, 0.4413], [0, 'Z', -0.000209188811, 4.859], [0, 'Z', 0.000291977904, 1.219],\
[0, 'Z', 4.04519971e-05, 0.4413], [0, 'X', -0.000662019566, 0.5725], [0, 'X', 0.000691434456, 0.083],\
[0, 'Y', 0.00029307311, 0.5725], [0, 'Y', -0.00030609495, 0.083], [0, 'Z', 0.000292702989, 0.5725],\
[0, 'Z', -0.000305708385, 0.083], [0, 'X', -2.76526194e-05, 0.025], [0, 'Y', 1.48379268e-05, 0.025],\
[0, 'Z', 2.0610484e-05, 0.025], [0, 'XX', 0.31450656, 4.195], [0, 'XX', 0.470547909, 1.377],\
[0, 'XX', 0.0843731353, 0.4828], [0, 'YY', 0.229594293, 4.195], [0, 'YY', 0.343506713, 1.377],\
[0, 'YY', 0.0615935972, 0.4828], [0, 'ZZ', -0.544100853, 4.195], [0, 'ZZ', -0.814054622, 1.377],\
[0, 'ZZ', -0.145966732, 0.4828], [0, 'XY', 0.97411606, 4.195], [0, 'XY', 1.4574204, 1.377],\
[0, 'XY', 0.261327542, 0.4828], [0, 'XZ', -0.0405782461, 4.195], [0, 'XZ', -0.0607110035, 1.377],\
[0, 'XZ', -0.0108859855, 0.4828], [0, 'YZ', 0.454156155, 4.195], [0, 'YZ', 0.679484171, 1.377],\
[0, 'YZ', 0.121837137, 0.4828], [0, 'XX', 0.00679643964, 0.1501], [0, 'YY', 0.00510300971, 0.1501],\
[0, 'ZZ', -0.0118994494, 0.1501], [0, 'XY', 0.0213597971, 0.1501], [0, 'XZ', -0.000875545312, 0.1501],\
[0, 'YZ', 0.0101456122, 0.1501], [1, 'S', -1.09192934e-05, 5909.0], [1, 'S', -2.01433647e-05, 887.5],\
[1, 'S', -3.25260993e-05, 204.7], [1, 'S', -4.40745993e-05, 59.84], [1, 'S', -4.59343528e-05, 20.0],\
[1, 'S', -2.67022701e-05, 7.193], [1, 'S', -4.15922017e-06, 2.686], [1, 'S', -5.50655737e-05, 7.193],\
[1, 'S', 6.32961312e-05, 0.7], [1, 'S', -9.03633726e-05, 0.2133], [1, 'X', -0.0384295808, 26.79],\
[1, 'X', -0.0374296346, 5.956], [1, 'X', -0.0262964056, 1.707], [1, 'X', -0.00998452978, 0.5314],\
[1, 'Y', -0.0131017406, 26.79], [1, 'Y', -0.0127608304, 5.956], [1, 'Y', -0.00896519497, 1.707],\
[1, 'Y', -0.00340401109, 0.5314], [1, 'Z', -0.0385472955, 26.79], [1, 'Z', -0.0375442863, 5.956],\
[1, 'Z', -0.0263769548, 1.707], [1, 'Z', -0.0100151137, 0.5314], [1, 'X', -0.000651252471, 0.1654],\
[1, 'Y', 0.00153747643, 0.1654], [1, 'Z', -0.000247277414, 0.1654], [2, 'S', 0.00582315223, 19.2406],\
[2, 'S', 0.00991884832, 2.8992], [2, 'S', 0.0114678673, 0.6534], [2, 'S', 0.00948207442, 0.1776],\
[3, 'S', 0.00214294491, 19.2406], [3, 'S', 0.00365017859, 2.8992], [3, 'S', 0.0042202242, 0.6534],\
[3, 'S', 0.00147162814, 0.1776], [4, 'S', -6.82029859e-05, 5909.0], [4, 'S', -0.000125817447, 887.5],\
[4, 'S', -0.000203161231, 204.7], [4, 'S', -0.000275294304, 59.84], [4, 'S', -0.000286910508, 20.0],\
[4, 'S', -0.000166785019, 7.193], [4, 'S', -2.59789003e-05, 2.686], [4, 'S', -0.000209550435, 7.193],\
[4, 'S', 0.000240871582, 0.7], [4, 'S', 3.90585007e-05, 0.2133], [4, 'X', -0.0222719157, 26.79],\
[4, 'X', -0.0216923955, 5.956], [4, 'X', -0.0152401175, 1.707], [4, 'X', -0.00578654779, 0.5314],\
[4, 'Y', -0.0526912426, 26.79], [4, 'Y', -0.0513202047, 5.956], [4, 'Y', -0.0360553057, 1.707],\
[4, 'Y', -0.0136899042, 0.5314], [4, 'Z', 0.0304810868, 26.79], [4, 'Z', 0.0296879621, 5.956],\
[4, 'Z', 0.0208574489, 1.707], [4, 'Z', 0.0079194025, 0.5314], [4, 'X', 0.000167167683, 0.1654],\
[4, 'Y', -0.000251508023, 0.1654], [4, 'Z', -1.83135036e-05, 0.1654], [5, 'S', 0.00944561796, 19.2406],\
[5, 'S', 0.0160891641, 2.8992], [5, 'S', 0.0186017966, 0.6534], [5, 'S', 0.0134597372, 0.1776],\
[6, 'S', -0.00237882245, 19.2406], [6, 'S', -0.00405195985, 2.8992], [6, 'S', -0.00468475134, 0.6534],\
[6, 'S', -0.00406723053, 0.1776], [7, 'S', 1.23113753e-05, 5909.0], [7, 'S', 2.27114077e-05, 887.5],\
[7, 'S', 3.66727958e-05, 204.7], [7, 'S', 4.96935943e-05, 59.84], [7, 'S', 5.17904446e-05, 20.0],\
[7, 'S', 3.01064967e-05, 7.193], [7, 'S', 4.68947201e-06, 2.686], [7, 'S', 3.27892042e-06, 7.193],\
[7, 'S', -3.76901507e-06, 0.7], [7, 'S', -3.21084903e-06, 0.2133], [7, 'X', -0.0453716649, 26.79],\
[7, 'X', -0.0441910841, 5.956], [7, 'X', -0.0310467009, 1.707], [7, 'X', -0.011788178, 0.5314],\
[7, 'Y', -0.0105021073, 26.79], [7, 'Y', -0.0102288401, 5.956], [7, 'Y', -0.00718633057, 1.707],\
[7, 'Y', -0.00272859085, 0.5314], [7, 'Z', -0.00940613787, 26.79], [7, 'Z', -0.00916138807, 5.956],\
[7, 'Z', -0.00643638601, 1.707], [7, 'Z', -0.00244384304, 0.5314], [7, 'X', -0.00186778818, 0.1654],\
[7, 'Y', -0.000712057951, 0.1654], [7, 'Z', 0.00113608073, 0.1654], [8, 'S', -0.00100831405, 19.2406],\
[8, 'S', -0.00171750862, 2.8992], [8, 'S', -0.00198573063, 0.6534], [8, 'S', -0.00299073334, 0.1776],\
[9, 'S', -0.0044447342, 19.2406], [9, 'S', -0.00757092424, 2.8992], [9, 'S', -0.00875326973, 0.6534],\
[9, 'S', -0.00423832627, 0.1776], [10, 'S', -0.000457245237, 5909.0], [10, 'S', -0.000843503078, 887.5],\
[10, 'S', -0.00136202989, 204.7], [10, 'S', -0.00184562314, 59.84], [10, 'S', -0.00192350029, 20.0],\
[10, 'S', -0.00111815714, 7.193], [10, 'S', -0.000174167279, 2.686], [10, 'S', -0.00104676381, 7.193],\
[10, 'S', 0.00120322182, 0.7], [10, 'S', 0.000895658494, 0.2133], [10, 'X', 0.0293098929, 26.79],\
[10, 'X', 0.028547243, 5.956], [10, 'X', 0.0200560302, 1.707], [10, 'X', 0.00761511035, 0.5314],\
[10, 'Y', 0.00944318459, 26.79], [10, 'Y', 0.00919747083, 5.956], [10, 'Y', 0.00646173616, 1.707],\
[10, 'Y', 0.00245346828, 0.5314], [10, 'Z', 0.0281989298, 26.79], [10, 'Z', 0.0274651874, 5.956],\
[10, 'Z', 0.0192958257, 1.707], [10, 'Z', 0.00732646696, 0.5314], [10, 'X', -0.00057701544, 0.1654],\
[10, 'Y', -0.00063761305, 0.1654], [10, 'Z', 0.00163521154, 0.1654], [11, 'S', 0.00172354654, 19.2406],\
[11, 'S', 0.00293579767, 2.8992], [11, 'S', 0.00339427896, 0.6534], [11, 'S', 0.000364705439, 0.1776],\
[12, 'S', -0.00587728837, 19.2406], [12, 'S', -0.0100110609, 2.8992], [12, 'S', -0.0115744807, 0.6534],\
[12, 'S', -0.00840107361, 0.1776], [13, 'S', 0.000115874039, 5909.0], [13, 'S', 0.000213758615, 887.5],\
[13, 'S', 0.00034516249, 204.7], [13, 'S', 0.000467713583, 59.84], [13, 'S', 0.000487449032, 20.0],\
[13, 'S', 0.000283360817, 7.193], [13, 'S', 4.41370722e-05, 2.686], [13, 'S', 0.000129872941, 7.193],\
[13, 'S', -0.000149284829, 0.7], [13, 'S', -0.000476787014, 0.2133], [13, 'X', 0.0269341081, 26.79],\
[13, 'X', 0.0262332766, 5.956], [13, 'X', 0.0184303397, 1.707], [13, 'X', 0.00699784902, 0.5314],\
[13, 'Y', 0.0519580248, 26.79], [13, 'Y', 0.0506060655, 5.956], [13, 'Y', 0.0355535831, 1.707],\
[13, 'Y', 0.0134994042, 0.5314], [13, 'Z', -0.0383878092, 26.79], [13, 'Z', -0.0373889499, 5.956],\
[13, 'Z', -0.0262678223, 1.707], [13, 'Z', -0.00997367694, 0.5314], [13, 'X', -2.67012554e-05, 0.1654],\
[13, 'Y', 0.000904200797, 0.1654], [13, 'Z', 5.09914983e-05, 0.1654], [14, 'S', -0.00912908749, 19.2406],\
[14, 'S', -0.0155500029, 2.8992], [14, 'S', -0.017978435, 0.6534], [14, 'S', -0.0120460603, 0.1776],\
[15, 'S', 0.0016055472, 19.2406], [15, 'S', 0.00273480385, 2.8992], [15, 'S', 0.00316189609, 0.6534],\
[15, 'S', 0.00209218298, 0.1776], [16, 'S', 0.00056041656, 5909.0], [16, 'S', 0.00103382836, 887.5],\
[16, 'S', 0.00166935387, 204.7], [16, 'S', 0.00226206353, 59.84], [16, 'S', 0.00235751262, 20.0],\
[16, 'S', 0.00137045446, 7.193], [16, 'S', 0.000213465815, 2.686], [16, 'S', 0.00123513498, 7.193],\
[16, 'S', -0.00141974851, 0.7], [16, 'S', -0.00113528459, 0.2133], [16, 'X', 0.0251517247, 26.79],\
[16, 'X', 0.0244972713, 5.956], [16, 'X', 0.0172106992, 1.707], [16, 'X', 0.00653476148, 0.5314],\
[16, 'Y', 0.0249511719, 26.79], [16, 'Y', 0.0243019368, 5.956], [16, 'Y', 0.0170734658, 1.707],\
[16, 'Y', 0.00648265511, 0.5314], [16, 'Z', 0.0387229602, 26.79], [16, 'Z', 0.0377153802, 5.956],\
[16, 'Z', 0.0264971578, 1.707], [16, 'Z', 0.0100607537, 0.5314], [16, 'X', -0.000516896597, 0.1654],\
[16, 'Y', 0.000704840718, 0.1654], [16, 'Z', -0.000725627747, 0.1654], [17, 'S', 0.00856706598, 19.2406],\
[17, 'S', 0.0145926853, 2.8992], [17, 'S', 0.0168716139, 0.6534], [17, 'S', 0.0131577385, 0.1776],\
[18, 'S', -0.00424743898, 19.2406], [18, 'S', -0.00723486203, 2.8992], [18, 'S', -0.00836472495, 0.6534],\
[18, 'S', -0.00641875413, 0.1776], [19, 'S', -0.00404193473, 19.2406], [19, 'S', -0.006884817, 2.8992],\
[19, 'S', -0.00796001365, 0.6534], [19, 'S', -0.0048262046, 0.1776], [20, 'S', 0.00765131699, 19.2406],\
[20, 'S', 0.0130328471, 2.8992], [20, 'S', 0.0150681769, 0.6534], [20, 'S', 0.0107127476, 0.1776],\
[21, 'S', 0.00394124012, 19.2406], [21, 'S', 0.00671329916, 2.8992], [21, 'S', 0.00776171, 0.6534],\
[21, 'S', 0.00637671339, 0.1776], [22, 'S', -0.0080669874, 19.2406], [22, 'S', -0.013740878, 2.8992],\
[22, 'S', -0.0158867805, 0.6534], [22, 'S', -0.0112089415, 0.1776], [23, 'S', -0.00703771987, 19.2406],\
[23, 'S', -0.0119876783, 2.8992], [23, 'S', -0.013859785, 0.6534], [23, 'S', -0.00968297661, 0.1776],\
[24, 'S', 0.00557469287, 19.2406], [24, 'S', 0.00949563585, 2.8992], [24, 'S', 0.0109785621, 0.6534],\
[24, 'S', 0.00747695987, 0.1776]]
RuNH3_6_37_data = [[0, 'S', 6.46572809e-05, 1.508], [0, 'S', 7.33062301e-05, 0.5129], [0, 'S', -7.86944994e-05, 0.1362],\
[0, 'S', -0.000705332604, 2.565], [0, 'S', 0.000604484705, 1.508], [0, 'S', 0.00011349444, 0.5129],\
[0, 'S', -5.48831361e-05, 0.0417], [0, 'X', -0.00067295965, 4.859], [0, 'X', 0.000939291866, 1.219],\
[0, 'X', 0.000130133929, 0.4413], [0, 'Y', 0.000629472821, 4.859], [0, 'Y', -0.000878594579, 1.219],\
[0, 'Y', -0.00012172464, 0.4413], [0, 'Z', 0.000910094893, 4.859], [0, 'Z', -0.00127027635, 1.219],\
[0, 'Z', -0.000175990082, 0.4413], [0, 'X', -0.000236679238, 0.5725], [0, 'X', 0.000247195383, 0.083],\
[0, 'Y', -0.000540292625, 0.5725], [0, 'Y', 0.000564298937, 0.083], [0, 'Z', 0.000564723999, 0.5725],\
[0, 'Z', -0.000589815847, 0.083], [0, 'X', -1.10741654e-05, 0.025], [0, 'Y', -2.85595029e-05, 0.025],\
[0, 'Z', 3.006976e-05, 0.025], [0, 'XX', 0.631922415, 4.195], [0, 'XX', 0.94544855, 1.377],\
[0, 'XX', 0.169526751, 0.4828], [0, 'YY', -0.328395499, 4.195], [0, 'YY', -0.491327797, 1.377],\
[0, 'YY', -0.0880991412, 0.4828], [0, 'ZZ', -0.303526916, 4.195], [0, 'ZZ', -0.454120753, 1.377],\
[0, 'ZZ', -0.0814276102, 0.4828], [0, 'XY', -0.799007062, 4.195], [0, 'XY', -1.19543167, 1.377],\
[0, 'XY', -0.214350794, 0.4828], [0, 'XZ', -0.201426236, 4.195], [0, 'XZ', -0.301363172, 1.377],\
[0, 'XZ', -0.0540369113, 0.4828], [0, 'YZ', 0.42420367, 4.195], [0, 'YZ', 0.634670864, 1.377],\
[0, 'YZ', 0.11380174, 0.4828], [0, 'XX', 0.0140554236, 0.1501], [0, 'YY', -0.0072542984, 0.1501],\
[0, 'ZZ', -0.00680112517, 0.1501], [0, 'XY', -0.017662346, 0.1501], [0, 'XZ', -0.00442890466, 0.1501],\
[0, 'YZ', 0.00925994787, 0.1501], [1, 'S', -0.000144334932, 5909.0], [1, 'S', -0.000266261843, 887.5],\
[1, 'S', -0.000429941038, 204.7], [1, 'S', -0.000582593037, 59.84], [1, 'S', -0.000607175891, 20.0],\
[1, 'S', -0.000352959684, 7.193], [1, 'S', -5.49779862e-05, 2.686], [1, 'S', -0.000458434382, 7.193],\
[1, 'S', 0.000526955788, 0.7], [1, 'S', 0.000175791862, 0.2133], [1, 'X', -0.0348575911, 26.79],\
[1, 'X', -0.0339505889, 5.956], [1, 'X', -0.023852182, 1.707], [1, 'X', -0.00905647809, 0.5314],\
[1, 'Y', 0.041745699, 26.79], [1, 'Y', 0.0406594666, 5.956], [1, 'Y', 0.0285655427, 1.707],\
[1, 'Y', 0.0108461026, 0.5314], [1, 'Z', -0.0194515659, 26.79], [1, 'Z', -0.0189454318, 5.956],\
[1, 'Z', -0.0133102224, 1.707], [1, 'Z', -0.00505378239, 0.5314], [1, 'X', 0.000364263504, 0.1654],\
[1, 'Y', -0.000219696323, 0.1654], [1, 'Z', -0.000642960835, 0.1654], [2, 'S', -0.00472926304, 19.2406],\
[2, 'S', -0.00805557555, 2.8992], [2, 'S', -0.00931360867, 0.6534], [2, 'S', -0.00755418466, 0.1776],\
[3, 'S', 0.00921480538, 19.2406], [3, 'S', 0.0156960102, 2.8992], [3, 'S', 0.0181472442, 0.6534],\
[3, 'S', 0.012974182, 0.1776], [4, 'S', -1.64347564e-05, 5909.0], [4, 'S', -3.03180143e-05, 887.5],\
[4, 'S', -4.89554132e-05, 204.7], [4, 'S', -6.63371959e-05, 59.84], [4, 'S', -6.91363327e-05, 20.0],\
[4, 'S', -4.01898997e-05, 7.193], [4, 'S', -6.26009102e-06, 2.686], [4, 'S', -5.20037054e-05, 7.193],\
[4, 'S', 5.97766106e-05, 0.7], [4, 'S', -2.91336866e-05, 0.2133], [4, 'X', -0.0356018561, 26.79],\
[4, 'X', -0.0346754879, 5.956], [4, 'X', -0.024361464, 1.707], [4, 'X', -0.00924984829, 0.5314],\
[4, 'Y', 0.0194700911, 26.79], [4, 'Y', 0.018963475, 5.956], [4, 'Y', 0.0133228987, 1.707],\
[4, 'Y', 0.00505859549, 0.5314], [4, 'Z', 0.0298093313, 26.79], [4, 'Z', 0.0290336859, 5.956],\
[4, 'Z', 0.0203977834, 1.707], [4, 'Z', 0.00774487125, 0.5314], [4, 'X', -0.00181624823, 0.1654],\
[4, 'Y', 0.000367426852, 0.1654], [4, 'Z', 0.00140796861, 0.1654], [5, 'S', 0.00289171111, 19.2406],\
[5, 'S', 0.00492558716, 2.8992], [5, 'S', 0.00569481236, 0.6534], [5, 'S', 0.00293981612, 0.1776],\
[6, 'S', -0.00565730622, 19.2406], [6, 'S', -0.00963635504, 2.8992], [6, 'S', -0.0111412573, 0.6534],\
[6, 'S', -0.00601759579, 0.1776], [7, 'S', -0.000210847953, 5909.0], [7, 'S', -0.000388961728, 887.5],\
[7, 'S', -0.000628068248, 204.7], [7, 'S', -0.000851065974, 59.84], [7, 'S', -0.000886977201, 20.0],\
[7, 'S', -0.000515612028, 7.193], [7, 'S', -8.03131695e-05, 2.686], [7, 'S', -0.000492953053, 7.193],\
[7, 'S', 0.000566633906, 0.7], [7, 'S', 0.000291422681, 0.2133], [7, 'X', 0.0493648046, 26.79],\
[7, 'X', 0.0480803214, 5.956], [7, 'X', 0.033779107, 1.707], [7, 'X', 0.0128256502, 0.5314],\
[7, 'Y', 0.0125337243, 26.79], [7, 'Y', 0.012207594, 5.956], [7, 'Y', 0.00857651553, 1.707],\
[7, 'Y', 0.00325643269, 0.5314], [7, 'Z', -0.0185032854, 26.79], [7, 'Z', -0.0180218258, 5.956],\
[7, 'Z', -0.0126613376, 1.707], [7, 'Z', -0.00480740616, 0.5314], [7, 'X', -0.000499509519, 0.1654],\
[7, 'Y', -0.000173911673, 0.1654], [7, 'Z', 0.00047173766, 0.1654], [8, 'S', -0.00538615124, 19.2406],\
[8, 'S', -0.00917448403, 2.8992], [8, 'S', -0.0106072562, 0.6534], [8, 'S', -0.00807752979, 0.1776],\
[9, 'S', 0.00881402738, 19.2406], [9, 'S', 0.0150133462, 2.8992], [9, 'S', 0.0173579691, 0.6534],\
[9, 'S', 0.0129760346, 0.1776], [10, 'S', 0.000211422777, 5909.0], [10, 'S', 0.000390022135, 887.5],\
[10, 'S', 0.000629780518, 204.7], [10, 'S', 0.000853386191, 59.84], [10, 'S', 0.000889395322, 20.0],\
[10, 'S', 0.000517017715, 7.193], [10, 'S', 8.05321232e-05, 2.686], [10, 'S', 0.000563489539, 7.193],\
[10, 'S', -0.00064771336, 0.7], [10, 'S', -0.000133561341, 0.2133], [10, 'X', -0.0549054694, 26.79],\
[10, 'X', -0.0534768168, 5.956], [10, 'X', -0.037570446, 1.707], [10, 'X', -0.0142651906, 0.5314],\
[10, 'Y', -0.0225286335, 26.79], [10, 'Y', -0.0219424334, 5.956], [10, 'Y', -0.0154157831, 1.707],\
[10, 'Y', -0.00585324655, 0.5314], [10, 'Z', 0.0241438107, 26.79], [10, 'Z', 0.0235155834, 5.956],\
[10, 'Z', 0.0165210087, 1.707], [10, 'Z', 0.00627289166, 0.5314], [10, 'X', -0.00083675402, 0.1654],\
[10, 'Y', -0.000482939566, 0.1654], [10, 'Z', 0.0014981449, 0.1654], [11, 'S', 0.00348617662, 19.2406],\
[11, 'S', 0.00593816816, 2.8992], [11, 'S', 0.00686552737, 0.6534], [11, 'S', 0.0038172112, 0.1776],\
[12, 'S', 0.00515560836, 19.2406], [12, 'S', 0.00878178954, 2.8992], [12, 'S', 0.010153235, 0.6534],\
[12, 'S', 0.00750694743, 0.1776], [13, 'S', -5.49524391e-05, 5909.0], [13, 'S', -0.000101373503, 887.5],\
[13, 'S', -0.000163690857, 204.7], [13, 'S', -0.000221809841, 59.84], [13, 'S', -0.000231169237, 20.0],\
[13, 'S', -0.000134381852, 7.193], [13, 'S', -2.09316927e-05, 2.686], [13, 'S', -0.000245504263, 7.193],\
[13, 'S', 0.000282199367, 0.7], [13, 'S', -8.95241297e-05, 0.2133], [13, 'X', 0.0296979085, 26.79],\
[13, 'X', 0.0289251623, 5.956], [13, 'X', 0.0203215396, 1.707], [13, 'X', 0.00771592209, 0.5314],\
[13, 'Y', -0.0229310905, 26.79], [13, 'Y', -0.0223344184, 5.956], [13, 'Y', -0.0156911744, 1.707],\
[13, 'Y', -0.00595781038, 0.5314], [13, 'Z', -0.0240680915, 26.79], [13, 'Z', -0.0234418344, 5.956],\
[13, 'Z', -0.0164691959, 1.707], [13, 'Z', -0.00625321876, 0.5314], [13, 'X', 0.000248203201, 0.1654],\
[13, 'Y', -0.00195105995, 0.1654], [13, 'Z', 4.95942547e-05, 0.1654], [14, 'S', -6.3651167e-05, 19.2406],\
[14, 'S', -0.000108420018, 2.8992], [14, 'S', -0.00012535189, 0.6534], [14, 'S', -0.00142203548, 0.1776],\
[15, 'S', -0.0050424763, 19.2406], [15, 'S', -0.0085890864, 2.8992], [15, 'S', -0.0099304375, 0.6534],\
[15, 'S', -0.00595550103, 0.1776], [16, 'S', 0.000175383692, 5909.0], [16, 'S', 0.000323539038, 887.5],\
[16, 'S', 0.000522428254, 204.7], [16, 'S', 0.000707918148, 59.84], [16, 'S', 0.000737789169, 20.0],\
[16, 'S', 0.000428886976, 7.193], [16, 'S', 6.68046333e-05, 2.686], [16, 'S', 0.000476517161, 7.193],\
[16, 'S', -0.000547741369, 0.7], [16, 'S', -0.000331187193, 0.2133], [16, 'X', 0.0423760874, 26.79],\
[16, 'X', 0.0412734522, 5.956], [16, 'X', 0.0289969018, 1.707], [16, 'X', 0.0110098861, 0.5314],\
[16, 'Y', -0.0526163557, 26.79], [16, 'Y', -0.0512472664, 5.956], [16, 'Y', -0.0360040625, 1.707],\
[16, 'Y', -0.0136704476, 0.5314], [16, 'Z', 0.0217360893, 26.79], [16, 'Z', 0.0211705114, 5.956],\
[16, 'Z', 0.0148734648, 1.707], [16, 'Z', 0.00564733276, 0.5314], [16, 'X', 0.0015961123, 0.1654],\
[16, 'Y', -0.000999996156, 0.1654], [16, 'Z', 0.000321058399, 0.1654], [17, 'S', 0.00241879545, 19.2406],\
[17, 'S', 0.00412004775, 2.8992], [17, 'S', 0.00476347248, 0.6534], [17, 'S', 0.00311018499, 0.1776],\
[18, 'S', -0.0088536234, 19.2406], [18, 'S', -0.015080792, 2.8992], [18, 'S', -0.0174359479, 0.6534],\
[18, 'S', -0.0104878557, 0.1776], [19, 'S', 0.00653506894, 19.2406], [19, 'S', 0.0111314894, 2.8992],\
[19, 'S', 0.0128698857, 0.6534], [19, 'S', 0.00880857322, 0.1776], [20, 'S', 0.00514585371, 19.2406],\
[20, 'S', 0.00876517399, 2.8992], [20, 'S', 0.0101340246, 0.6534], [20, 'S', 0.00725308796, 0.1776],\
[21, 'S', -0.0085865535, 19.2406], [21, 'S', -0.0146258793, 2.8992], [21, 'S', -0.0169099918, 0.6534],\
[21, 'S', -0.0110080324, 0.1776], [22, 'S', -0.00458878599, 19.2406], [22, 'S', -0.00781629442, 2.8992],\
[22, 'S', -0.00903695918, 0.6534], [22, 'S', -0.00673431556, 0.1776], [23, 'S', 0.00282027644, 19.2406],\
[23, 'S', 0.00480390914, 2.8992], [23, 'S', 0.00555413198, 0.6534], [23, 'S', 0.00344027048, 0.1776],\
[24, 'S', -0.00359574202, 19.2406], [24, 'S', -0.00612479603, 2.8992], [24, 'S', -0.00708130078, 0.6534],\
[24, 'S', -0.00539612557, 0.1776]]
RuNH36_35 = get_mol_orbital(RuNH3_6_35_data, RuNH3_6_centers)
RuNH36_36 = get_mol_orbital(RuNH3_6_36_data, RuNH3_6_centers)
RuNH36_37 = get_mol_orbital(RuNH3_6_37_data, RuNH3_6_centers)
return [RuNH36_35, RuNH36_36, RuNH36_37]
else:
pass

View File

@@ -0,0 +1,52 @@
from __future__ import annotations
import numpy as np
from pathlib import Path
from nptyping import NDArray, Shape, Number
class ACF:
def __init__(self,
coords: NDArray[Shape['Natoms, 3'], Number],
nelec_per_atom: NDArray[Shape['Natoms'], Number],
spin_per_atom: NDArray[Shape['Natoms'], Number] = None):
self.coords = coords
self.nelec_per_atom = nelec_per_atom
self.nelec_per_isolated_atom = None
self.spin_per_atom = spin_per_atom
@staticmethod
def from_file(filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath)
file.readline()
line = file.readline()
ncols = len(line.split('+'))
if '+' in line:
if ncols == 7:
data = np.genfromtxt(filepath, skip_header=2, skip_footer=5, delimiter='|')
return ACF(data[:, 1:4], data[:, 4])
elif ncols == 8:
data = np.genfromtxt(filepath, skip_header=2, skip_footer=7, delimiter='|')
return ACF(data[:, 1:4], data[:, 4], data[:, 5])
else:
raise IOError(f'Can parse ACF.dat with 7 or 8 columns, but {ncols=} was given')
else:
data = np.genfromtxt(filepath, skip_header=2, skip_footer=4)
return ACF(data[:, 1:4], data[:, 4])
def get_charge(self,
nelec_per_isolated_atom: NDArray[Shape['Natoms'], Number] | None = None):
if nelec_per_isolated_atom is not None:
return nelec_per_isolated_atom - self.nelec_per_atom
else:
if self.nelec_per_isolated_atom is not None:
return self.nelec_per_isolated_atom - self.nelec_per_atom
else:
raise ValueError('nelec_per_isolated_atom should be defined either as argument of '
'this function or as self.nelec_per_isolated_atom')
def get_delta_elec(self,
nelec_per_isolated_atom: NDArray[Shape['Natoms'], Number]):
return self.nelec_per_atom - nelec_per_isolated_atom

View File

@@ -0,0 +1,233 @@
from matplotlib import colors
import numpy as np
from monty.re import regrep
from echem.core.structure import Structure
from nptyping import NDArray, Shape, Number
from dataclasses import dataclass
from echem.core.constants import ElemNum2Name, Bohr2Angstrom
@dataclass()
class LocalMultipoleMoments:
net_charges: NDArray[Shape['Natoms'], Number]
dipoles: NDArray[Shape['Natoms, 4'], Number]
quadrupoles: NDArray[Shape['Natoms, 8'], Number]
class Output_DDEC:
def __init__(self,
structure: Structure,
lmm_hirshfeld: LocalMultipoleMoments,
lmm_ddec: LocalMultipoleMoments,
charges_cm5: NDArray[Shape['Natons'], Number]):
self.structure = structure
self.lmm_hirshfeld = lmm_hirshfeld
self.lmm_ddec = lmm_ddec
self.charges_cm5 = charges_cm5
@staticmethod
def _process_lmm_(data, line_number, natoms):
charges_ddec = np.zeros(natoms)
dipoles_ddec = np.zeros((natoms, 4))
quadrupoles_ddec = np.zeros((natoms, 8))
idx = 0
while len(line := data[line_number].split()) != 0:
charges_ddec[idx] = float(line[5])
dipoles_ddec[idx] = list(map(float, line[6: 10]))
quadrupoles_ddec[idx] = list(map(float, line[10:]))
line_number += 1
idx += 1
return LocalMultipoleMoments(charges_ddec, dipoles_ddec, quadrupoles_ddec)
@staticmethod
def from_file(filepath):
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'lattice': r' parameters',
'lmm': r'Multipole analysis for each of the expansion sites.',
'cm5': r'The computed CM5 net atomic charges are:'}
matches = regrep(filepath, patterns)
lattice = np.zeros((3, 3))
i = matches['lattice'][0][1]
natoms = int((data[i + 1].split()[0]).split('.')[0])
line = data[i + 2].split()
NX = int(line[0].split('.')[0])
lattice[0] = np.array([float(line[1]), float(line[2]), float(line[3])])
line = data[i + 3].split()
NY = int(line[0].split('.')[0])
lattice[1] = np.array([float(line[1]), float(line[2]), float(line[3])])
line = data[i + 4].split()
NZ = int(line[0].split('.')[0])
lattice[2] = np.array([float(line[1]), float(line[2]), float(line[3])])
if NX > 0 and NY > 0 and NZ > 0:
units = 'Bohr'
elif NX < 0 and NY < 0 and NZ < 0:
units = 'Angstrom'
else:
raise ValueError('The sign of the number of all voxels should be > 0 or < 0')
if units == 'Angstrom':
NX, NY, NZ = -NX, -NY, -NZ
lattice = lattice * np.array([NX, NY, NZ]).reshape((-1, 1)) * Bohr2Angstrom
coords = np.zeros((natoms, 3))
species = []
line_number = matches['lmm'][0][1] + 3
idx = 0
while len(line := data[line_number].split()) != 0:
species.append(ElemNum2Name[int(line[1])])
coords[idx] = list(map(float, line[2:5]))
line_number += 1
structure = Structure(lattice, species, coords)
line_number = matches['lmm'][0][1] + 3
lmm_hirshfeld = Output_DDEC._process_lmm_(data, line_number, natoms)
line_number = matches['lmm'][1][1] + 3
lmm_ddec = Output_DDEC._process_lmm_(data, line_number, natoms)
line_number = matches['cm5'][0][1] + 1
charges_cm5 = []
i = 0
while i < natoms:
charges = list(map(float, data[line_number].split()))
charges_cm5 += charges
line_number += 1
i += len(charges)
return Output_DDEC(structure, lmm_hirshfeld, lmm_ddec, np.array(charges_cm5))
class AtomicNetCharges:
"""Class that operates with DDEC output file DDEC6_even_tempered_net_atomic_charges.xyz"""
def __init__(self, structure: Structure, net_charges, dipoles_xyz=None,
dipoles_mag=None, Qs=None, quadrupole_tensor_eigs=None, date=None):
"""
Create a DDEC class object.
Args:
structure (Structure class): a base class that contains lattice, coords and species information
net_charges:
dipoles_xyz:
dipoles_mag:
Qs:
quadrupole_tensor_eigs:
"""
self.structure = structure
self.net_charges = net_charges
self.dipoles_xyz = dipoles_xyz
self.dipoles_mag = dipoles_mag
self.Qs = Qs
self.quadrupole_tensor_eigs = quadrupole_tensor_eigs
self.date = date
@staticmethod
def from_file(filepath):
"""
Read the positions of atoms and theirs charges
from file "DDEC6_even_tempered_net_atomic_charges.xyz"
Parameters:
----------
filepath: str
Path to file with atomic charges
Returns:
-------
DDEC class instance
"""
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'date': r'\s+(\d\d\d\d/\d\d/\d\d\s+\d\d:\d\d:\d\d)'}
matches = regrep(filepath, patterns)
date = matches['date'][0][0][0]
natoms = int(data[0])
x_axis = data[1].split()[10:13]
y_axis = data[1].split()[15:18]
z_axis = data[1].split()[20:23]
lattice = np.array([x_axis, y_axis, z_axis], dtype=np.float32)
for start_line, string in enumerate(data):
if 'The following XYZ coordinates are in angstroms' in string:
break
coords = np.zeros((natoms, 3))
species = []
net_charges = np.zeros(natoms)
dipoles_xyz = np.zeros((natoms, 3))
dipoles_mag = np.zeros(natoms)
Qs = np.zeros((natoms, 5))
quadrupole_tensor_eigs = np.zeros((natoms, 3))
for i, j in enumerate(range(start_line + 2, start_line + 2 + natoms)):
line_splitted = data[j].split()
species.append(line_splitted[1])
coords[i] = line_splitted[2:5]
net_charges[i] = line_splitted[5]
dipoles_xyz[i] = line_splitted[6:9]
dipoles_mag[i] = line_splitted[9]
Qs[i] = line_splitted[10:15]
quadrupole_tensor_eigs[i] = line_splitted[15:18]
structure = Structure(lattice, species, coords, coords_are_cartesian=True)
return AtomicNetCharges(structure, net_charges, dipoles_xyz, dipoles_mag, Qs, quadrupole_tensor_eigs, date)
class AtomicSpinMoments:
"""Class that operates with DDEC output file DDEC6_even_tempered_atomic_spin_moments.xyz"""
def __init__(self, structure: Structure, spin_moments, date):
self.structure = structure
self.spin_moments = spin_moments
self.date = date
@staticmethod
def from_file(filepath):
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'date': r'\s+(\d\d\d\d/\d\d/\d\d\s+\d\d:\d\d:\d\d)'}
matches = regrep(filepath, patterns)
date = matches['date'][0][0][0]
natoms = int(data[0])
x_axis = data[1].split()[10:13]
y_axis = data[1].split()[15:18]
z_axis = data[1].split()[20:23]
lattice = np.array([x_axis, y_axis, z_axis], dtype=np.float32)
coords = np.zeros((natoms, 3))
species = []
spin_moments = np.zeros(natoms)
for i, j in enumerate(range(2, 2 + natoms)):
line_splitted = data[j].split()
species += [line_splitted[0]]
coords[i] = line_splitted[1:4]
spin_moments[i] = line_splitted[4]
structure = Structure(lattice, species, coords, coords_are_cartesian=True)
return AtomicSpinMoments(structure, spin_moments, date)
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))

View File

@@ -0,0 +1,114 @@
import numpy as np
from typing import Union, List, Iterable
from monty.re import regrep
import re
from echem.io_data.ddec import AtomicNetCharges
class GasSensor:
def __init__(self):
pass
@staticmethod
def read_OUTCAR(filepath) -> float:
"""
This function reads your OUTCAR file to get the final Energy of the system.
"""
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'energy_ionic': r'free energy\s+TOTEN\s+=\s+(.\d+\.\d+)\s+eV'}
matches = regrep(filepath, patterns)
end_energy = np.array([float(i[0][0]) for i in matches['energy_ionic']])
return end_energy[-1]
@staticmethod
def sort_DDEC_output(filepath, k: int) -> list[int]:
"""
This function sorts atoms in your system to get atoms related to your molecule
k - number of atoms consisting in your molecule
"""
z_coords = {}
idx = 1
with open(filepath, 'r') as file:
while True:
line = file.readline()
if re.search('\sChargemol', line) is not None:
break
list_of_substrings = line.split(' ')
if re.search('^\s|^j', list_of_substrings[0]) is None:
z_coords[idx] = list_of_substrings[-2]
idx += 1
continue
sorted_z_coords = dict(sorted(z_coords.items(), key = lambda item: item[1], reverse = True))
result = []
counter = 0
for item in sorted_z_coords:
if counter < k:
result.append(item)
counter += 1
else:
break
return result
@staticmethod
def get_chrg_mol(filepath, k: int or list[int]) -> float:
"""
This function can help you to get the molecule charge.
filepath - DDEC6 output file: DDEC_even_tempered_net_atomic_charges.xyz
k - the number of atoms, consisting in molecule. Besides, your can write the ordinal number of an atom in your molecule.
"""
atomic_charges = AtomicNetCharges.from_file(filepath)
net_charges = atomic_charges.net_charges
if type(k) == int:
chrg_molecule = 0
targets = GasSensor.sort_DDEC_output(filepath, k)
for i in targets:
chrg_molecule += net_charges[i - 1]
return chrg_molecule
elif isinstance(k, list):
targets = k
chrg_molecule = 0
for i in targets:
chrg_molecule += net_charges[i - 1]
return chrg_molecule
@staticmethod
def get_Ead(filepath, E_surface, E_molecule) -> float:
"""
This function can help you to get the adsorption energy, using energy of whole system, energy of the surface and energy of your molecule.
filepath - your OUTCAR file obtained as a result of VASP optimization.
"""
E_system = GasSensor.read_OUTCAR(filepath)
E_ad = E_system - E_surface - E_molecule
#print(E_ad)
return E_ad
@staticmethod
def get_energy_in_meV(filepath):
"""
This function can help you to get energies in meV.
filepath - your .txt file, consisting of energies in eV.
"""
X = np.genfromtxt(filepath)
X_new = []
for i in X:
X_new = X * 1000
return f'Your energies in meV: {X_new}'

View File

@@ -0,0 +1,227 @@
import os
import re
import shutil
from typing import Callable
from echem.io_data.jdftx import Ionpos, Lattice
from echem.io_data.vasp import Poscar
from echem.core.structure import Structure
from echem.core.constants import THz2eV
from echem.core.thermal_properties import ThermalProperties
from InterPhon.core import PreProcess, PostProcess
from nptyping import NDArray, Shape, Number
from typing import Union
from pathlib import Path
class InterPhonInterface(ThermalProperties):
def __init__(self,
folder_to_jdftx_files: Union[str, Path],
folder_files_to_copy: Union[str, Path] = None,
select_fun: Callable[[Structure], list[list[str]]] = None,
user_args: dict = None,
sym_flag: bool = True):
if isinstance(folder_to_jdftx_files, str):
folder_to_jdftx_files = Path(folder_to_jdftx_files)
if isinstance(folder_files_to_copy, str):
folder_files_to_copy = Path(folder_files_to_copy)
self.folder_to_jdftx_files = folder_to_jdftx_files
self.folder_files_to_copy = folder_files_to_copy
self.select_fun = select_fun
self.user_args = user_args
self.sym_flag = sym_flag
self.post_process = None
self.eigen_freq = None
self.weights = None
def _create_poscar_for_interphon(self,
folder_to_jdftx_files: Path,
select_fun: Callable[[Structure], list[list[str]]] = None) -> None:
"""
Function creates POSCAR with unitcell for InterPhon adding selective dynamics data
Args:
folder_to_jdftx_files (str): path to folder with jdft.ionpos and jdft.lattice files
select_fun (Callable, optional): function that take Structure as input and provides list with
selective dynamics data for POSCAR class. All atoms are allowed to move in default.
"""
ionpos = Ionpos.from_file(folder_to_jdftx_files / 'jdft.ionpos')
lattice = Lattice.from_file(folder_to_jdftx_files / 'jdft.lattice')
poscar = ionpos.convert('vasp', lattice)
if select_fun is not None:
sd_data = select_fun(poscar.structure)
else:
sd_data = [['T', 'T', 'T'] for _ in range(poscar.structure.natoms)]
poscar.sdynamics_data = sd_data
poscar.to_file(folder_to_jdftx_files / 'POSCAR_unitcell_InterPhon')
def _make_preprocess(self,
poscar_unitcell: Path,
folder_to_disps: Path,
folder_files_to_copy: Path = None,
user_args: dict = None,
sym_flag: bool = True) -> None:
"""
Function creates folders with POSCARs with displaced atoms and all other necessary files for calculation
Args:
poscar_unitcell (str): path to the POSCAR file that contains the unitcell for IterPhon
with defined sd dynamics
folder_to_disps (str): path to a folder where all new folders with corresponding POSCARs with
displaced atoms will be created
folder_files_to_copy (str, optional): path to a folder from which all files will be copied to each
new folder with new POSCARs
user_args (dict, optional): dist with all necessary information for the InterPhon PreProcess class.
Only 2D periodicity is supported. If you want switch off symmetries, you have to define 'periodicity'
in user_args and set sym_flag=False
Example and default value: user_args = {'dft_code': 'vasp', 'displacement': 0.05,
'enlargement': "1 1 1", 'periodicity': "1 1 0"}
sym_flag (bool, optional): if True the symmetry will be applied. Only 2D symmetries are supported
"""
if user_args is None:
user_args = {'dft_code': 'vasp',
'displacement': 0.05,
'enlargement': '1 1 1',
'periodicity': '1 1 0'}
if poscar_unitcell != folder_to_disps / 'POSCAR_unitcell_InterPhon':
shutil.copyfile(poscar_unitcell, folder_to_disps / 'POSCAR_unitcell_InterPhon')
pre_process = PreProcess()
pre_process.set_user_arg(user_args)
pre_process.set_unit_cell(in_file=str(poscar_unitcell),
code_name='vasp')
pre_process.set_super_cell(out_file=str(folder_to_disps / 'POSCAR_supercell_InterPhon'),
code_name='vasp')
pre_process.write_displace_cell(out_file=str(folder_to_disps / 'POSCAR'),
code_name='vasp',
sym_flag=sym_flag)
poscars_disp = [f for f in folder_to_disps.iterdir() if f.is_file() and bool(re.search(r'POSCAR-\d{4}$',
f.name))]
for poscar_disp in poscars_disp:
poscar = Poscar.from_file(poscar_disp)
ionpos, lattice = poscar.convert('jdftx')
subfolder_to_disp = folder_to_disps / poscar_disp.name[-4:]
if not os.path.isdir(subfolder_to_disp):
os.mkdir(subfolder_to_disp)
ionpos.to_file(subfolder_to_disp / 'jdft.ionpos')
lattice.to_file(subfolder_to_disp / 'jdft.lattice')
shutil.copyfile(folder_to_disps / poscar_disp, subfolder_to_disp / 'POSCAR')
if folder_files_to_copy is not None:
files_to_copy = [f for f in folder_files_to_copy.iterdir() if f.is_file()]
for file in files_to_copy:
shutil.copyfile(file, subfolder_to_disp / file.name)
with open(folder_to_disps / 'user_args_InterPhon', 'w') as file:
for key, value in user_args.items():
file.write(f'{key}: {value}\n')
def _make_postprocess(self,
folder_to_disps: Path,
filepath_unitcell: Path,
filepath_supercell: Path,
filepath_kpoints: Path,
user_args: dict = None,
sym_flag: bool = True) -> None:
"""
Function process the output files after all calculations with displaced atoms are finished
Args:
folder_to_disps (str): path to the folder contains all folders with performed calculations with
atom displacements
filepath_unitcell (str): path to the POSCAR file that contains the unitcell for IterPhon
with defined sd dynamics
filepath_supercell (str): path to the POSCAR file produced by InterPhon with proper enlargement
filepath_kpoints (str): path to the KPOINTS file. The phonons will be assessed in the given k-points
user_args (dict, optional): dist with all necessary information for the InterPhon PreProcess class.
Example and default value: user_args = {'dft_code': 'vasp', 'displacement': 0.05,
'enlargement': "1 1 1", 'periodicity': "1 1 0"}
sym_flag (bool, optional): if True the symmetry will be applied. Only 2D symmetries are supported
"""
if user_args is None:
user_args = {'dft_code': 'vasp',
'displacement': 0.05,
'enlargement': '1 1 1',
'periodicity': '1 1 0'}
output_paths = [f / 'output.out' for f in folder_to_disps.iterdir()
if f.is_dir() and bool(re.search(r'\d{4}$', f.name))]
post_process = PostProcess(in_file_unit_cell=str(filepath_unitcell),
in_file_super_cell=str(filepath_supercell),
code_name='vasp')
post_process.set_user_arg(user_args)
post_process.set_reciprocal_lattice()
post_process.set_force_constant(force_files=[str(f) for f in output_paths],
code_name='jdftx',
sym_flag=sym_flag)
post_process.set_k_points(k_file=str(filepath_kpoints))
post_process.eval_phonon()
self.post_process = post_process
ThermalProperties.__init__(self, self.post_process.w_q * THz2eV)
def create_displacements_jdftx(self):
self._create_poscar_for_interphon(folder_to_jdftx_files=self.folder_to_jdftx_files,
select_fun=self.select_fun)
self._make_preprocess(poscar_unitcell=self.folder_to_jdftx_files / 'POSCAR_unitcell_InterPhon',
folder_to_disps=self.folder_to_jdftx_files,
folder_files_to_copy=self.folder_files_to_copy,
user_args=self.user_args,
sym_flag=self.sym_flag)
def get_phonons(self) -> NDArray[Shape['Nkpts, Nfreq'], Number]:
if self.post_process is None:
self._make_postprocess(folder_to_disps=self.folder_to_jdftx_files,
filepath_unitcell=self.folder_to_jdftx_files / 'POSCAR_unitcell_InterPhon',
filepath_supercell=self.folder_to_jdftx_files / 'POSCAR_supercell_InterPhon',
filepath_kpoints=self.folder_to_jdftx_files / 'KPOINTS',
user_args=self.user_args,
sym_flag=self.sym_flag)
return self.eigen_freq
def get_Gibbs_ZPE(self) -> float:
if self.eigen_freq is None:
self._make_postprocess(folder_to_disps=self.folder_to_jdftx_files,
filepath_unitcell=self.folder_to_jdftx_files / 'POSCAR_unitcell_InterPhon',
filepath_supercell=self.folder_to_jdftx_files / 'POSCAR_supercell_InterPhon',
filepath_kpoints=self.folder_to_jdftx_files / 'KPOINTS',
user_args=self.user_args,
sym_flag=self.sym_flag)
return ThermalProperties.get_Gibbs_ZPE(self)
def get_enthalpy_vib(self,
T: float) -> float:
if self.eigen_freq is None:
self._make_postprocess(folder_to_disps=self.folder_to_jdftx_files,
filepath_unitcell=self.folder_to_jdftx_files / 'POSCAR_unitcell_InterPhon',
filepath_supercell=self.folder_to_jdftx_files / 'POSCAR_supercell_InterPhon',
filepath_kpoints=self.folder_to_jdftx_files / 'KPOINTS',
user_args=self.user_args,
sym_flag=self.sym_flag)
return ThermalProperties.get_enthalpy_vib(self, T)
def get_TS_vib(self,
T: float) -> float:
if self.eigen_freq is None:
self._make_postprocess(folder_to_disps=self.folder_to_jdftx_files,
filepath_unitcell=self.folder_to_jdftx_files / 'POSCAR_unitcell_InterPhon',
filepath_supercell=self.folder_to_jdftx_files / 'POSCAR_supercell_InterPhon',
filepath_kpoints=self.folder_to_jdftx_files / 'KPOINTS',
user_args=self.user_args,
sym_flag=self.sym_flag)
return ThermalProperties.get_TS_vib(self, T)

View File

@@ -0,0 +1,914 @@
from __future__ import annotations
import numpy as np
import re
from monty.re import regrep
from echem.core.structure import Structure
from echem.core.constants import Bohr2Angstrom, Angstrom2Bohr, Hartree2eV, eV2Hartree
from echem.core.ionic_dynamics import IonicDynamics
from echem.core.electronic_structure import EBS
from echem.core.thermal_properties import ThermalProperties
from echem.io_data import vasp
from echem.io_data.universal import Cube
from typing import Union, Literal, TypedDict
from typing_extensions import NotRequired
from pathlib import Path
import warnings
import copy
from nptyping import NDArray, Shape, Number
from termcolor import colored
class Lattice:
def __init__(self,
lattice: NDArray[Shape['3, 3'], Number]):
self.lattice = lattice
@staticmethod
def from_file(filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'lattice': r'^\s*lattice\s+'}
matches = regrep(str(filepath), patterns)
lattice = []
i = 0
while len(lattice) < 9:
line = data[matches['lattice'][0][1] + i].split()
for word in line:
try:
word = float(word)
lattice.append(word)
except:
pass
i += 1
lattice = np.array(lattice).reshape((3, 3))
return Lattice(lattice)
def to_file(self, filepath: str):
file = open(filepath, 'w')
file.write('lattice \\\n')
width_coords_float = max(len(str(int(np.max(self.lattice)))), len(str(int(np.min(self.lattice))))) + 16
for i, vector in enumerate(self.lattice):
file.write('\t')
for vector_i in vector:
file.write(f'{vector_i:{width_coords_float}.15f} ')
if i < 2:
file.write('\\')
file.write('\n')
file.close()
class Ionpos:
def __init__(self,
species: list[str],
coords: NDArray[Shape['Natoms, 3'], Number],
move_scale: list[int] | NDArray[Shape['Natoms'], Number] = None,
constraint_type: list[Literal['HyperPlane', 'Linear', 'None', 'Planar'] | None] = None,
constraint_params: list[list[float] | None] = None):
self.species = species
self.coords = coords
if move_scale is None:
move_scale = np.ones(len(coords), dtype=int)
elif isinstance(move_scale, list):
move_scale = np.array(move_scale, dtype=int)
self.move_scale = move_scale
self.constraint_type = constraint_type
self.constraint_params = constraint_params
@staticmethod
def from_file(filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'coords': r'^\s*ion\s+'}
matches = regrep(str(filepath), patterns)
natoms = len(matches['coords'])
species = []
coords = np.zeros((natoms, 3))
move_scale = np.zeros(natoms, dtype=int)
constraint_type = []
constraint_params = []
for i, ion in enumerate(matches['coords']):
line = data[ion[1]].split()
species.append(line[1])
coords[i] = [line[2], line[3], line[4]]
move_scale[i] = line[5]
if len(line) > 6:
constraint_type.append(line[6])
constraint_params.append([float(line[7]), float(line[8]), float(line[9])])
else:
constraint_type.append(None)
constraint_params.append(None)
return Ionpos(species, coords, move_scale, constraint_type, constraint_params)
def to_file(self,
filepath: str | Path) -> None:
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'w')
width_species = max([len(sp) for sp in self.species])
width_coords_float = max(len(str(int(np.max(self.coords)))), len(str(int(np.min(self.coords))))) + 16
if self.constraint_params is None and self.constraint_type is None:
for sp, coord, ms in zip(self.species, self.coords, self.move_scale):
file.write(f'ion {sp:{width_species}} ')
for coord_i in coord:
file.write(f'{coord_i:{width_coords_float}.15f} ')
file.write(f'{ms}\n')
elif self.constraint_params is not None and self.constraint_type is not None:
for sp, coord, ms, ctype, cparams in zip(self.species, self.coords, self.move_scale,
self.constraint_type, self.constraint_params):
file.write(f'ion {sp:{width_species}} ')
for coord_i in coord:
file.write(f'{coord_i:{width_coords_float}.15f} ')
if ctype is None:
file.write(f'{ms}\n')
else:
file.write(f'{ms} ')
file.write(f'{ctype} ')
file.write(f'{cparams[0]} {cparams[1]} {cparams[2]}\n')
else:
raise ValueError('constraint_type and constraint_params must be both specified or both be None')
file.close()
def convert(self,
format: Literal['vasp'], *args):
if format == 'vasp':
lattice = np.transpose(args[0].lattice) * Bohr2Angstrom
return vasp.Poscar(Structure(lattice, self.species, self.coords * Bohr2Angstrom))
else:
raise NotImplemented('Currently only format=vasp is supported')
def get_structure(self,
lattice: Lattice) -> Structure:
return Structure(lattice.lattice * Bohr2Angstrom, self.species, self.coords * Bohr2Angstrom)
class Input:
def __init__(self, commands: list[tuple[str, str]]):
self.commands = commands
@staticmethod
def from_file(filepath: str | Path):
file = open(filepath, 'r')
data = file.readlines()
file.close()
commands = []
to_append = ''
for line in data:
line = line.strip().strip('\n')
if line.endswith('\\'):
to_append += re.sub(r'\s+', ' ', line.strip('\\'))
else:
if len(line) == 0:
continue
to_append += line
line = to_append.split()
if line[0] == 'dump':
for i in line[2:]:
commands.append(('dump', f'{line[1]} {i}'))
else:
commands.append((line[0], ' '.join(line[1:])))
to_append = ''
return Input(commands)
class EnergyIonicHist(TypedDict):
F: NDArray[Shape['Nsteps'], Number]
G: NotRequired[NDArray[Shape['Nsteps'], Number]]
muN: NotRequired[NDArray[Shape['Nsteps'], Number]]
class Output(IonicDynamics):
def __init__(self,
fft_box_size: NDArray[Shape['3'], Number],
energy_ionic_hist: EnergyIonicHist,
coords_hist: NDArray[Shape['Nsteps, Natoms, 3'], Number],
forces_hist: NDArray[Shape['Nsteps, Natoms, 3'], Number] | None,
nelec_hist: NDArray[Shape['Nsteps'], Number],
magnetization_hist: NDArray[Shape['Nesteps, 2'], Number] | None,
structure: Structure,
nbands: int,
nkpts: int,
mu: float | None,
HOMO: float | None,
LUMO: float | None,
phonons: dict[Literal['real', 'imag', 'zero', 'nStates'], np.ndarray | None],
pseudopots: dict,
lowding: dict[str, float] | None):
super(Output, self).__init__(forces_hist, coords_hist, structure.lattice, True)
self.fft_box_size = fft_box_size
self.energy_ionic_hist = energy_ionic_hist
self.coords_hist = coords_hist
self.nelec_hist = nelec_hist
self.magnetization_hist = magnetization_hist
self.structure = structure
self.nbands = nbands
self.nkpts = nkpts
self.mu = mu
self.HOMO = HOMO
self.LUMO = LUMO
self.phonons = phonons
self.pseudopots = pseudopots
self.lowdin = lowding
if phonons['real'] is not None and len(phonons['real']) > 0:
self.thermal_props = ThermalProperties(np.array([phonons['real']]) * Hartree2eV)
@property
def energy(self) -> float:
if 'G' in self.energy_ionic_hist.keys():
return self.energy_ionic_hist['G'][-1]
else:
return self.energy_ionic_hist['F'][-1]
@property
def nisteps(self) -> int:
return len(self.energy_ionic_hist['F'])
@property
def nelec(self) -> float:
return self.nelec_hist[-1]
@property
def nelec_pzc(self) -> int:
return np.sum([self.structure.natoms_by_type[key] * self.pseudopots[key] for key in self.pseudopots.keys()])
@property
def magnetization_abs(self) -> float:
if self.magnetization_hist is None:
raise ValueError('It is non-spin-polarized calculation')
else:
return self.magnetization_hist[-1, 0]
@property
def magnetization_tot(self) -> float:
if self.magnetization_hist is None:
raise ValueError('It is non-spin-polarized calculation')
else:
return self.magnetization_hist[-1, 1]
@property
def nspin(self) -> int:
if self.magnetization_hist is None:
return 1
else:
return 2
@staticmethod
def from_file(filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
# \TODO Non-Cartesin coods case is not implemented
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'natoms': r'Initialized \d+ species with (\d+) total atoms.',
'coords': r'# Ionic positions in cartesian coordinates:',
'forces': r'# Forces in Cartesian coordinates:',
'fft_box_size': r'Chosen fftbox size, S = \[(\s+\d+\s+\d+\s+\d+\s+)\]',
'lattice': r'---------- Initializing the Grid ----------',
'nbands': r'nBands:\s+(\d+)',
'nkpts': r'Reduced to (\d+) k-points under symmetry',
'nkpts_folded': r'Folded \d+ k-points by \d+x\d+x\d+ to (\d+) k-points.',
'is_kpts_irreducable': r'No reducable k-points',
'nelec': r'nElectrons:\s+(\d+.\d+)',
'magnetization': r'magneticMoment:\s+\[\s+Abs:\s+(\d+.\d+)\s+Tot:\s+([-+]?\d*\.\d*)',
'mu': r'\s+mu\s+:\s+([-+]?\d*\.\d*)',
'mu_hist': r'mu:\s+([-+]?\d*\.\d*)',
'HOMO': r'\s+HOMO\s*:\s+([-+]?\d*\.\d*)',
'LUMO': r'\s+LUMO\s*:\s+([-+]?\d*\.\d*)',
'F': r'^\s*F\s+=\s+([-+]?\d*\.\d*)',
'muN': r'\s+muN\s+=\s+([-+]?\d*\.\d*)',
'G': r'\s+G\s+=\s+([-+]?\d*\.\d*)',
'phonon report': r'(\d+) imaginary modes, (\d+) modes within cutoff, (\d+) real modes',
'zero mode': r'Zero mode \d+:',
'imaginary mode': r'Imaginary mode \d+:',
'real mode': r'Real mode \d+:',
'ionic convergence': r'IonicMinimize: Converged',
'pseudopots': r'\s*Title:\s+([a-zA-Z0-9]*).',
'valence_elecs': r'(\d+) valence electrons in orbitals',
'phonon_perturbations': r'\s+Perturbation:\s+\d+\s+nStates:\s+(\d+)',
'lowdin': r'#--- Lowdin population analysis ---'}
matches = regrep(str(filepath), patterns)
F = np.array([float(i[0][0]) for i in matches['F']])
energy_ionic_hist: EnergyIonicHist = {'F': F}
if 'muN' in matches.keys():
energy_ionic_hist['muN'] = np.array([float(i[0][0]) for i in matches['muN']])
if 'G' in matches.keys():
energy_ionic_hist['G'] = np.array([float(i[0][0]) for i in matches['G']])
nelec_hist = np.array([float(i[0][0]) for i in matches['nelec']])
natoms = int(matches['natoms'][0][0][0])
nbands = int(matches['nbands'][0][0][0])
phonons = {}
if matches['phonon_perturbations']:
nstates = [int(i[0][0]) for i in matches['phonon_perturbations']]
phonons['nStates'] = np.array(nstates)
else:
phonons['nStates'] = None
if bool(matches['is_kpts_irreducable']):
nkpts = int(matches['nkpts_folded'][0][0][0])
else:
nkpts = int(matches['nkpts'][0][0][0])
if bool(matches['mu']):
mu = float(matches['mu'][0][0][0])
elif matches['mu_hist']:
mu = float(matches['mu_hist'][-1][0][0])
else:
mu = None
if bool(matches['HOMO']):
HOMO = float(matches['HOMO'][0][0][0])
else:
HOMO = None
if bool(matches['LUMO']):
LUMO = float(matches['LUMO'][0][0][0])
else:
LUMO = None
if bool(matches['magnetization']):
magnetization_hist = np.zeros((len(matches['magnetization']), 2))
for i, mag in enumerate(matches['magnetization']):
magnetization_hist[i] = [float(mag[0][0]), float(mag[0][1])]
else:
magnetization_hist = None
fft_box_size = np.array([int(i) for i in matches['fft_box_size'][0][0][0].split()])
lattice = np.zeros((3, 3))
lattice[0] = [float(i) for i in data[matches['lattice'][0][1] + 2].split()[1:4]]
lattice[1] = [float(i) for i in data[matches['lattice'][0][1] + 3].split()[1:4]]
lattice[2] = [float(i) for i in data[matches['lattice'][0][1] + 4].split()[1:4]]
lattice = lattice.T * Bohr2Angstrom
if matches['forces']:
line_numbers = [int(i[1]) + 1 for i in matches['forces']]
forces_hist = np.zeros((len(line_numbers), natoms, 3))
for i, line_number in enumerate(line_numbers):
atom_number = 0
while len(line := data[line_number + atom_number].split()) > 0:
forces_hist[i, atom_number] = [float(line[2]), float(line[3]), float(line[4])]
atom_number += 1
else:
forces_hist = None
if matches['phonon report']:
freq_report = {key: int(i) for key, i in zip(['imaginary modes', 'modes within cutoff', 'real modes'],
matches['phonon report'][0][0])}
if freq_report['modes within cutoff']:
line_numbers = [int(i[1]) + 1 for i in matches['zero mode']]
zero_mode_freq = np.zeros(freq_report['modes within cutoff'], dtype=complex)
for i, line_number in enumerate(line_numbers):
zero_mode_freq[i] = complex(data[line_number].split()[1].replace('i', 'j'))
else:
zero_mode_freq = None
if freq_report['imaginary modes']:
line_numbers = [int(i[1]) + 1 for i in matches['imaginary mode']]
imag_mode_freq = np.zeros(freq_report['imaginary modes'], dtype=complex)
for i, line_number in enumerate(line_numbers):
imag_mode_freq[i] = complex(data[line_number].split()[1].replace('i', 'j'))
else:
imag_mode_freq = None
if freq_report['real modes']:
line_numbers = [int(i[1]) + 1 for i in matches['real mode']]
real_mode_freq = np.zeros(freq_report['real modes'])
for i, line_number in enumerate(line_numbers):
real_mode_freq[i] = float(data[line_number].split()[1])
else:
real_mode_freq = None
phonons['zero'] = zero_mode_freq
phonons['imag'] = imag_mode_freq
phonons['real'] = real_mode_freq
else:
phonons['zero'] = None
phonons['imag'] = None
phonons['real'] = None
if matches['coords']:
line_numbers = [int(i[1]) + 1 for i in matches['coords']]
coords_hist = np.zeros((len(line_numbers), natoms, 3))
species = []
atom_number = 0
while len(line := data[line_numbers[0] + atom_number].split()) > 0:
species += [line[1]]
atom_number += 1
for i, line_number in enumerate(line_numbers):
atom_number = 0
while len(line := data[line_number + atom_number].split()) > 0:
coords_hist[i, atom_number] = [float(line[2]), float(line[3]), float(line[4])]
atom_number += 1
else:
matches = regrep(str(filepath), {'ions': r'ion\s+([a-zA-Z]+)\s+[-+]?\d*\.\d*',
'coords': r'ion\s+[a-zA-Z]+\s+([-+]?\d*\.\d*)\s+([-+]?\d*\.\d*)\s+([-+]?\d*\.\d*)'})
species = [i[0][0] for i in matches['ions']]
coords_hist = [[[float(i) for i in coord[0]] for coord in matches['coords']]]
coords_hist = np.array(coords_hist)
if bool(matches['lowdin']):
lowdin = {}
i = matches['lowdin'][-1][1] + 1
while (line := data[i]) != '\n':
line = line.split()
lowdin[line[2]] = [float(i) for i in line[3:]]
if bool(matches['magnetization']):
i += 2
else:
i += 1
else:
lowdin = None
structure = Structure(lattice, species, coords_hist[-1] * Bohr2Angstrom, coords_are_cartesian=True)
pseudopots = {i[0][0]: int(j[0][0]) for i, j in zip(matches['pseudopots'], matches['valence_elecs'])}
return Output(fft_box_size, energy_ionic_hist, coords_hist, forces_hist, nelec_hist, magnetization_hist,
structure, nbands, nkpts, mu, HOMO, LUMO, phonons, pseudopots, lowdin)
def get_xdatcar(self) -> vasp.Xdatcar:
transform = np.linalg.inv(self.structure.lattice)
return vasp.Xdatcar(structure=self.structure,
trajectory=np.matmul(self.coords_hist * Bohr2Angstrom, transform))
def get_poscar(self) -> vasp.Poscar:
structure = copy.copy(self.structure)
structure.coords = self.coords_hist[0] * Bohr2Angstrom
return vasp.Poscar(structure=structure)
def get_contcar(self) -> vasp.Poscar:
return vasp.Poscar(structure=self.structure)
def get_ionpos(self, nstep=-1) -> Ionpos:
return Ionpos(self.structure.species, self.coords_hist[nstep])
def get_lattice(self) -> Lattice:
return Lattice(self.structure.lattice * Angstrom2Bohr)
def mod_phonon_zero2real(self, n_leave: int = 0) -> None:
if self.phonons['zero'] is not None:
mask_real = self.phonons['zero'].imag == 0
mask_complex = np.invert(mask_real)
n_real = np.sum(mask_real)
n_imag = np.sum(mask_complex)
n_zero = len(self.phonons['zero'])
if n_zero < n_leave:
print(colored(f'There is only {n_zero} zero modes, however you set {n_leave=}',
color='red', attrs=['bold']))
elif n_zero > n_leave:
if n_leave > n_imag:
n_transfer = n_real - (n_leave - n_imag)
else:
n_transfer = np.sum(mask_real)
mods_for_transfer = None
if n_zero - n_transfer > n_leave:
print(colored(f'Can not leave', color='red', attrs=['bold']),
n_leave,
colored('modes, because there are', color='red', attrs=['bold']),
n_imag,
colored('imaginary modes', color='red', attrs=['bold']))
print(colored('The following values can not be converted to real:', color='red', attrs=['bold']),
self.phonons['zero'][mask_complex])
if np.any(mask_real):
mods_for_transfer = np.sort(self.phonons['zero'][mask_real].real)
print(colored('The following values will be converted to real:', color='red', attrs=['bold']),
self.phonons['zero'][mask_real])
else:
mods_for_transfer = np.sort(self.phonons['zero'][mask_real].real)[-n_transfer:]
if mods_for_transfer is not None:
self.phonons['real'] = np.hstack((mods_for_transfer, self.phonons['real']))
del_indices = []
for mode in mods_for_transfer:
del_indices.append(np.where(self.phonons['zero'] == mode)[0][0])
self.phonons['zero'] = np.delete(self.phonons['zero'], del_indices)
else:
print(colored('There are no zero phonons', color='green', attrs=['bold']))
class EBS_data:
@staticmethod
def from_file(filepath: str | Path,
output: Output) -> NDArray[Shape['Nspin, Nkpts, Nbands'], Number]:
if isinstance(filepath, str):
filepath = Path(filepath)
data = np.fromfile(filepath, dtype=np.float64)
if len(data) % (output.nkpts * output.nbands) != 0:
raise ValueError(
f'Number of eigenvalues should be equal to nspin * nkpts * nbands, but now {output.nkpts=},'
f'{output.nbands=}, and data has {len(data)} values')
nspin = len(data) // (output.nkpts * output.nbands)
data = data.reshape(nspin, output.nkpts, output.nbands)
return data
class Eigenvals(EBS_data):
def __init__(self,
eigenvalues: NDArray[Shape['Nspin, Nkpts, Nbands'], Number],
units: Literal['eV', 'Hartree']):
self.eigenvalues = eigenvalues
self.units = units
@staticmethod
def from_file(filepath: str | Path,
output: Output) -> 'Eigenvals':
if isinstance(filepath, str):
filepath = Path(filepath)
eigenvalues = super(Eigenvals, Eigenvals).from_file(filepath, output)
return Eigenvals(eigenvalues, 'Hartree')
def mod_to_eV(self):
if self.units == 'eV':
print('Units are already eV')
else:
self.eigenvalues *= Hartree2eV
self.units = 'eV'
def mod_to_Ha(self):
if self.units == 'Hartree':
print('Units are already Hartree')
else:
self.eigenvalues *= eV2Hartree
self.units = 'Hartree'
class Fillings(EBS_data):
def __init__(self,
occupations: np.ndarray):
self.occupations = occupations
@staticmethod
def from_file(filepath: str | Path,
output: Output) -> 'Fillings':
if isinstance(filepath, str):
filepath = Path(filepath)
occupations = super(Fillings, Fillings).from_file(filepath, output)
return Fillings(occupations)
class VolumetricData:
def __init__(self,
data: np.ndarray,
structure: Structure):
self.data = data
self.structure = structure
def __add__(self, other):
assert isinstance(other, VolumetricData), 'Other object must belong to VolumetricData class'
assert self.data.shape == other.data.shape, f'Shapes of two data arrays must be the same but they are ' \
f'{self.data.shape} and {other.data.shape}'
if self.structure != other.structure:
warnings.warn('Two VolumetricData instances contain different Structures. '
'The Structure will be taken from the 2nd (other) instance. '
'Hope you know, what you are doing')
return VolumetricData(self.data + other.data, other.structure)
def __sub__(self, other):
assert isinstance(other, VolumetricData), 'Other object must belong to VolumetricData class'
assert self.data.shape == other.data.shape, f'Shapes of two data arrays must be the same but they are ' \
f'{self.data.shape} and {other.data.shape}'
if self.structure != other.structure:
warnings.warn('Two VolumetricData instances contain different Structures. '
'The Structure will be taken from the 2nd (other) instance. '
'Hope you know, what you are doing')
return VolumetricData(self.data - other.data, other.structure)
@staticmethod
def from_file(filepath: str | Path,
fft_box_size: NDArray[Shape['3'], Number],
structure: Structure):
if isinstance(filepath, str):
filepath = Path(filepath)
data = np.fromfile(filepath, dtype=np.float64)
data = data.reshape(fft_box_size)
return VolumetricData(data, structure)
def convert_to_cube(self) -> Cube:
return Cube(self.data, self.structure, np.zeros(3))
class kPts:
def __init__(self,
weights: NDArray[Shape['Nkpts'], Number]):
self.weights = weights
@staticmethod
def from_file(filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'r')
data = file.readlines()
file.close()
weights = []
if 'spin' in data[0].split():
for line in data[:int(len(data) / 2)]:
weights.append(float(line.split()[6]))
weights = np.array(weights)
return kPts(weights)
else:
for line in data:
weights.append(float(line.split()[6]))
weights = np.array(weights) / 2
return kPts(weights)
class DOS(EBS):
@staticmethod
def from_folder(folderpath: str | Path,
output_name: str = 'output.out',
jdft_prefix='jdft',
units: Literal['eV', 'Ha'] = 'eV'):
if isinstance(folderpath, str):
folderpath = Path(folderpath)
out = Output.from_file(folderpath / output_name)
eigs = Eigenvals.from_file(folderpath / f'{jdft_prefix}.eigenvals', output=out)
fills = Fillings.from_file(folderpath / f'{jdft_prefix}.fillings', output=out)
kpts = kPts.from_file(folderpath / f'{jdft_prefix}.kPts')
if units == 'eV':
return DOS(eigenvalues=eigs.eigenvalues * Hartree2eV,
weights=kpts.weights,
efermi=out.mu * Hartree2eV,
occupations=fills.occupations)
elif units == 'Ha':
return DOS(eigenvalues=eigs.eigenvalues,
weights=kpts.weights,
efermi=out.mu,
occupations=fills.occupations)
else:
raise ValueError(f'units can be "eV" or "Ha", however you entered "{units}"')
class BandProjections:
def __init__(self,
proj_coeffs: NDArray[Shape['Nspin, Nkpts, Nbands, Norbs'], Number],
weights: NDArray[Shape['Nkpts'], Number],
species: list[str],
norbs_per_atomtype: dict,
orbs_names: list[str],
orbs_data: list[dict]):
self.proj_coeffs = proj_coeffs
self.weights = weights
self.species = species
self.norbs_per_atomtype = norbs_per_atomtype
self.orbs_names = orbs_names
self.orbs_data = orbs_data
self.eigenvalues = None
@property
def nspin(self):
return self.proj_coeffs.shape[0]
@property
def nkpts(self):
return self.proj_coeffs.shape[1]
@property
def nbands(self):
return self.proj_coeffs.shape[2]
@property
def norbs(self):
return self.proj_coeffs.shape[3]
@staticmethod
def from_file(filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'x': r'#\s+\d+'}
matches = regrep(str(filepath), patterns)
nstates = int(data[0].split()[0])
nbands = int(data[0].split()[2])
norbs = int(data[0].split()[4])
if 'spin' in data[int(matches['x'][0][1])]:
nspin = 2
else:
nspin = 1
nkpts = int(nstates / nspin)
proj_coeffs = np.zeros((nspin, nkpts, nbands, norbs))
weights = np.zeros(nstates)
start_lines = []
for i, match in enumerate(matches['x']):
start_lines.append(int(match[1]))
weights[i] = float(re.sub(r'[^0-9.]', '', data[int(match[1])].split()[7]))
if nspin == 2 and not np.array_equal(weights[:len(weights) // 2], weights[len(weights) // 2:]):
raise ValueError(f'Kpts weights can not be correctly split {weights=}')
if nspin == 2:
weights = weights[:len(weights) // 2]
species = []
norbs_per_atomtype = {}
orbs_names = []
orbs_data = []
idx_atom = -1
for iline in range(2, start_lines[0]):
line = data[iline].split()
atomtype = line[0]
natoms_per_atomtype = int(line[1])
species += [atomtype] * natoms_per_atomtype
norbs_per_atomtype[line[0]] = int(line[2])
l_max = int(line[3])
nshalls_per_l = []
for i in range(l_max + 1):
nshalls_per_l.append(int(line[4 + i]))
for i in range(natoms_per_atomtype):
idx_atom += 1
for l, n_max in zip(range(l_max + 1), nshalls_per_l):
for n in range(n_max):
if l == 0:
orbs_names.append(f'{idx_atom} {atomtype} s {n + 1}({n_max})')
orbs_data.append({'atom_type': atomtype,
'atom_index': idx_atom,
'l': l,
'm': 0,
'orb_name': 's'})
elif l == 1:
for m, m_name in zip([-1, 0, 1], ['p_x', 'p_y', 'p_z']):
orbs_names.append(f'{idx_atom} {atomtype} {m_name} {n + 1}({n_max})')
orbs_data.append({'atom_type': atomtype,
'atom_index': idx_atom,
'l': l,
'm': m,
'orb_name': m_name})
elif l == 2:
for m, m_name in zip([-2, -1, 0, 1, 2], ['d_xy', 'd_yz', 'd_z^2', 'd_xz', 'd_x^2-y^2']):
orbs_names.append(f'{idx_atom} {atomtype} {m_name} {n + 1}({n_max})')
orbs_data.append({'atom_type': atomtype,
'atom_index': idx_atom,
'l': l,
'm': m,
'orb_name': m_name})
elif l > 2:
raise NotImplementedError('Only s, p snd d orbitals are currently supported')
ikpt_major = -1
ikpt_minor = -1
for istate, (start, stop) in enumerate(zip(start_lines[:-1], start_lines[1:])):
if nspin == 2:
if data[start].split()[9] == '+1;':
ispin = 0
ikpt_major += 1
ikpt = ikpt_major
elif data[start].split()[9] == '-1;':
ispin = 1
ikpt_minor += 1
ikpt = ikpt_minor
else:
raise ValueError(f'Can\'t determine spin in string {data[start].split()}')
else:
ispin = 0
ikpt = istate
for iband, line in enumerate(range(start + 1, stop)):
proj_coeffs[ispin, ikpt, iband] = [float(k) for k in data[line].split()]
return BandProjections(proj_coeffs, weights / 2, species, norbs_per_atomtype, orbs_names, orbs_data)
def get_PDOS(self,
atom_numbers: list[int] | int,
eigenvals: Eigenvals,
get_orbs_names: bool = False,
specific_l: int = None,
dE: float = 0.01,
emin: float = None,
emax: float = None,
zero_at_fermi: bool = False,
sigma: float = 0.02,
efermi: float = None) -> Union[tuple[NDArray[Shape['Ngrid'], Number],
NDArray[Shape['Nspin, Norbs_selected, Ngrid'], Number]],
tuple[NDArray[Shape['Ngrid'], Number],
NDArray[Shape['Nspin, Norbs_selected, Ngrid'], Number],
list[str]]]:
self.eigenvalues = eigenvals.eigenvalues
if isinstance(atom_numbers, int):
atom_numbers = [atom_numbers]
if zero_at_fermi is True and efermi is None:
raise ValueError('You can not set zero_at_fermi=True if you did not specify efermi value')
if emin is None:
emin = np.min(self.eigenvalues) - 1
if emax is None:
emax = np.max(self.eigenvalues) + 1
E_arr = np.arange(emin, emax, dE)
ngrid = E_arr.shape[0]
idxs = []
for atom in atom_numbers:
start = sum([self.norbs_per_atomtype[i] for i in self.species[:atom]])
for i in range(self.norbs_per_atomtype[self.species[atom]]):
idxs.append(start + i)
if specific_l is not None:
idxs = [idx for idx in idxs if self.orbs_data[idx]['l'] == specific_l]
proj_coeffs_weighted = self.proj_coeffs[:, :, :, idxs]
for spin in range(self.nspin):
for i, weight_kpt in enumerate(self.weights):
proj_coeffs_weighted[spin, i] *= weight_kpt
W_arr = np.moveaxis(proj_coeffs_weighted, [1, 2, 3], [2, 3, 1])
G_arr = EBS.gaussian_smearing(E_arr, self.eigenvalues, sigma)
PDOS_arr = np.zeros((self.nspin, len(idxs), ngrid))
for spin in range(self.nspin):
for idx in range(len(idxs)):
PDOS_arr[spin, idx] = np.sum(G_arr[spin, :, :, :] * W_arr[spin, idx, :, :, None],
axis=(0, 1))
if self.nspin == 1:
PDOS_arr *= 2
if get_orbs_names:
if zero_at_fermi:
return E_arr - efermi, PDOS_arr, [self.orbs_names[i] for i in idxs]
else:
return E_arr, PDOS_arr, [self.orbs_names[i] for i in idxs]
else:
if zero_at_fermi:
return E_arr - efermi, PDOS_arr
else:
return E_arr, PDOS_arr
def VVSHE_2_mu_Ha(V):
V_ref = 4.66
return - (V_ref + V) * eV2Hartree
def mu_Ha_2_VVSHE(mu):
V_ref = 4.66
return - (mu * Hartree2eV + V_ref)

View File

@@ -0,0 +1,117 @@
from monty.re import regrep
import matplotlib.pyplot as plt
from pathlib import Path
from echem.io_data.jdftx import Output
from echem.core.constants import Hartree2eV
import numpy as np
def get_energies_from_logs(folderpath, plot=False, dpi=200):
patterns = {'en': r'(\d+).(\d+)\s+Current Energy:\s+(.\d+\.\d+)', 'iter': r'Now starting iteration (\d+) on\s+\[(.+)\]'}
NEBlogpath = Path(folderpath) / 'logfile_NEB.log'
pylogpath = Path(folderpath) / 'py.log'
matches_neb = regrep(str(NEBlogpath), patterns)
matches_py = regrep(str(pylogpath), patterns)
iterations_number = len(matches_py['iter'])
energies = []
n_images_list = []
for i in range(iterations_number):
energies.append([])
images_list = matches_py['iter'][i][0][1].split(', ')
energies[i] = {key: [] for key in images_list}
n_images = len(images_list)
n_images_list.append(n_images)
for i in range(len(matches_neb['en'])):
iteration = int(matches_neb['en'][i][0][0])
image = matches_neb['en'][i][0][1]
energies[iteration - 1][image].append(float(matches_neb['en'][i][0][2]))
if plot:
max_i = 0
for i in range(len(energies)):
plt.figure(dpi=dpi)
barrier = []
all_images = []
for image in energies[i].keys():
if int(image) > max_i:
max_i = int(image)
plt.scatter([int(image) for _ in range(len(energies[i][image]))], energies[i][image], c=f'C{int(image)}')
if len(energies[i][image]) != 0:
plt.scatter(int(image), energies[i][image][-1], c=f'C{int(image)}')
barrier.append(energies[i][image][-1])
all_images.append(int(image))
plt.plot(all_images, barrier, c='black')
return plt, energies
else:
return energies
def get_energies_from_outs(folderpath, opt_history=False, plot=False, dpi=200):
folderpath /= 'iterations'
neb_barriers_hist = []
neb_barriers = []
for iter, iter_path in enumerate(folderpath.glob('iter_*')):
neb_barriers.append([])
neb_barriers_hist.append([])
for f_path in iter_path.glob('[0-9]'):
out = Output.from_file(f_path / 'output.out')
if opt_history:
neb_barriers_hist[iter].append(out.energy_ionic_hist['G'] * Hartree2eV)
neb_barriers[iter].append(out.energy_ionic_hist['G'][-1] * Hartree2eV)
if plot:
if opt_history:
for i, barrier in enumerate(neb_barriers_hist):
plt.figure(dpi=dpi)
plt.title(f'Iteration {i}')
for i, traj in enumerate(barrier):
plt.plot(traj, label=i)
plt.legend(frameon=False)
plt.figure(dpi=dpi)
for i, barrier in enumerate(neb_barriers):
plt.plot(barrier, label=i)
plt.legend(frameon=False)
return plt, neb_barriers, neb_barriers_hist
else:
plt.figure(dpi=dpi)
for i, barrier in enumerate(neb_barriers):
plt.plot(barrier, label=i)
plt.legend(frameon=False)
return plt, neb_barriers
else:
return neb_barriers
def get_energies_from_pylog(filepath, plot=False, dpi=200):
energies = []
with open(filepath) as f:
data = f.readlines()
for line in data:
if 'Energies after iteration' in line:
energies.append(list(map(float, line.strip().split('[')[1][:-1].split(', '))))
if plot:
plt.figure(dpi=dpi)
for i, e in enumerate(energies):
plt.plot(e, label=i)
plt.legend(frameon=False)
return plt, energies
else:
return energies
def get_energies_from_NEBlog(folderpath, plot=False, dpi=200):
patterns = {'en': r'(\d+)\s+Current Energy:\s+(.\d+\.\d+)', \
'images': r'Successfully initialized JDFTx calculator(.+)/(\d+)'}
NEBlogpath = Path(folderpath) / 'logfile_NEB.log'
matches_neb = regrep(str(NEBlogpath), patterns)
nimages = len(matches_neb['images'])
energies = [[] for i in range(nimages)]
for i in range(len(matches_neb['en'])):
image = int(matches_neb['en'][i][0][0])
energies[image-1].append(float(matches_neb['en'][i][0][1]))
if plot:
plt.figure(dpi=dpi)
barrier = []
all_images = []
for image in range(len(energies)):
plt.scatter([image for _ in range(len(energies[image]))], energies[image], c=f'C{image}')
barrier.append(energies[image][-1])
all_images.append(int(image))
plt.plot(all_images, barrier, c='black')
return plt, energies
else:
return energies

View File

@@ -0,0 +1,171 @@
import numpy as np
import pandas as pd
import re
from monty.re import regrep
from tqdm import tqdm
from .universal import Xyz
from nptyping import NDArray, Shape, Number
class SCFLog:
""""""
def __init__(self, eigenvalues=None, occupation=None, mol_orbs=None):
""""""
self.eigenvalues = eigenvalues
self.occupations = occupation
self.mol_orbs = mol_orbs
@property
def natoms(self):
if self.mol_orbs is not None:
return np.max(self.mol_orbs[0]['atom_ids']) + 1
else:
return ValueError('natoms might be calculated only if mol_orbs had been read')
@property
def nbands(self):
if self.eigenvalues is not None:
return len(self.eigenvalues[0])
elif self.mol_orbs is not None:
return len(self.mol_orbs[0].columns) - 3
else:
return ValueError('nbands might be calculated only if eigenvalues or mol_orbs had been read')
@property
def nsteps(self):
if self.eigenvalues is not None:
return len(self.eigenvalues)
elif self.mol_orbs is not None:
return len(self.mol_orbs)
else:
return ValueError('nbands might be calculated only if eigenvalues or mol_orbs had been read')
@staticmethod
def from_file(filepath):
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'eigs': 'ORBITAL ENERGIES',
'mos': 'MOLECULAR ORBITALS'}
matches = regrep(filepath, patterns)
occs = []
eigs = []
for match in tqdm(matches['eigs'], desc='Eigenvalues', total=len(matches['eigs'])):
eigs_tmp = []
occs_tmp = []
i = match[1] + 4
while data[i] != '\n' and data[i] != '------------------\n':
line = data[i].split()
occs_tmp.append(float(line[1]))
eigs_tmp.append(float(line[3]))
i += 1
occs.append(occs_tmp)
eigs.append(eigs_tmp)
mos_arr = []
for match in tqdm(matches['mos'], desc='Molecular Orbitals', total=len(matches['mos'])):
df = pd.DataFrame()
first_columns_appended = None
last_batch_added = False
i = match[1] + 2
while data[i] != '\n' and data[i] != '------------------\n':
if re.match(r'\s*\w+\s+\w+\s+([-+]?\d*\.\d*\s+)+', data[i]) is not None:
last_batch_added = False
line = data[i].split()
if first_columns_appended is False:
atom_number = re.match(r'\d+', line[0])
mos_tmp[0].append(int(atom_number[0]))
atom_symbol = line[0][len(atom_number[0]):]
mos_tmp[1].append(atom_symbol)
orbital = line[1]
mos_tmp[2].append(orbital)
for j, value in enumerate(line[2:]):
mos_tmp[3 + j].append(float(value))
i += 1
elif first_columns_appended is True:
for j, value in enumerate(line[2:]):
mos_tmp[j].append(float(value))
i += 1
else:
pass
elif re.match(r'\s*(\d+\s+)+', data[i]) is not None:
line = data[i].split()
if first_columns_appended is False:
first_columns_appended = True
last_batch_added = True
df['atom_ids'] = mos_tmp[0][1:]
df['species'] = mos_tmp[1][1:]
df['orbital'] = mos_tmp[2][1:]
for j in range(3, len(mos_tmp)):
df[mos_tmp[j][0]] = mos_tmp[j][1:]
mos_tmp = [[] for _ in range(len(line))]
for j, n_mo in enumerate(line):
mos_tmp[j].append(int(n_mo))
i += 1
elif first_columns_appended is None:
last_batch_added = True
mos_tmp = [[] for j in range(len(line) + 3)]
mos_tmp[0].append('')
mos_tmp[1].append('')
mos_tmp[2].append('')
for j, n_mo in enumerate(line):
mos_tmp[3 + j].append(int(n_mo))
first_columns_appended = False
i += 1
elif first_columns_appended is True:
last_batch_added = True
for j in range(len(mos_tmp)):
df[mos_tmp[j][0]] = mos_tmp[j][1:]
mos_tmp = [[] for _ in range(len(line))]
for j, n_mo in enumerate(line):
mos_tmp[j].append(int(n_mo))
i += 1
else:
i += 1
if not last_batch_added:
# df = pd.concat([df, pd.DataFrame(mos_tmp)], axis=1)
for j in range(len(mos_tmp)):
df[mos_tmp[j][0]] = mos_tmp[j][1:]
mos_arr.append(df)
return SCFLog(np.array(eigs), np.array(occs), mos_arr)
class XyzTrajectory:
def __init__(self,
first_xyz: Xyz,
trajectory: NDArray[Shape['Nsteps, Natoms, 3'], Number],
energies_pot: NDArray[Shape['Nsteps'], Number]):
self.first_xyz = first_xyz
self.trajectory = trajectory
self.energies_pot = energies_pot
@staticmethod
def from_file(filepath):
first_xyz = Xyz.from_file(filepath)
trajectory = []
energies_pot = []
with open(filepath, 'rt') as file:
while True:
try:
natoms = int(file.readline().strip())
except:
break
line = file.readline()
energies_pot.append(float(line.split()[5]))
#energies_pot.append(float(line.split()[8].split('=')[1]))
coords = np.zeros((natoms, 3))
for i in range(natoms):
line = file.readline().split()
coords[i] = [float(j) for j in line[1:]]
trajectory.append(coords)
return XyzTrajectory(first_xyz, np.array(trajectory), np.array(energies_pot))

View File

@@ -0,0 +1,129 @@
import numpy as np
from monty.re import regrep
import itertools
import typing
class QEOutput:
def __init__(self):
self.patterns = {'nkpts': r'number of k points=\s+([\d]+)',
'kpts_coord': r'k\s*=\s*(-?\d.[\d]+)\s*(-?\d.[\d]+)\s*(-?\d.[\d]+)\s*\([\d]+ PWs\)',
'occupations': 'occupation numbers',
'efermi': r'the Fermi energy is\s*(-?[\d]+.[\d]+) ev'}
self.eigenvalues = None
self.weights = None
self.occupations = None
self.efermi = None
self.nkpt = None
@staticmethod
def _GaussianSmearing(x, x0, sigma):
"""Simulate the Delta function by a Gaussian shape function"""
return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2))
def from_file(self, filepath):
matches = regrep(filepath, self.patterns)
if len(matches['kpts_coord']) != 0:
with open(filepath, 'rt') as file:
file_data = file.readlines()
eigenvalues = []
for start, end in zip(matches['kpts_coord'], matches['occupations']):
data = file_data[start[1] + 2:end[1] - 1]
data = [float(i) for i in itertools.chain.from_iterable([line.split() for line in data])]
eigenvalues.append(data)
self.eigenvalues = np.array(eigenvalues)
occupations = []
n_strings_occups = matches['occupations'][0][1] - matches['kpts_coord'][0][1] - 1
for start in matches['occupations']:
data = file_data[start[1] + 1: start[1] + n_strings_occups]
data = [float(i) for i in itertools.chain.from_iterable([line.split() for line in data])]
occupations.append(data)
self.occupations = np.array(occupations)
self.efermi = float(matches['efermi'][0][0][0])
self.nkpt = int(matches['nkpts'][0][0][0])
weights = np.zeros(self.nkpt)
for i in range(self.nkpt):
weights[i] = file_data[matches['nkpts'][0][1]+2+i].split()[-1]
self.weights = weights
def get_band_eigs(self, bands):
if type(bands) is int:
return np.array([eig for eig in self.eigenvalues[:, bands]])
elif isinstance(bands, typing.Iterable):
return np.array([[eig for eig in self.eigenvalues[:, band]] for band in bands])
else:
raise ValueError('Variable bands should be int or iterable')
def get_band_occ(self, bands):
if type(bands) is int:
return [occ for occ in self.occupations[:, bands]]
elif isinstance(bands, typing.Iterable):
return np.array([[occ for occ in self.occupations[:, band]] for band in bands])
else:
raise ValueError('Variable bands should be int or iterable')
def get_DOS(self, **kwargs):
"""Calculate Density of States based on eigenvalues and its weights
Args:
dE (float): step of energy array in function output
zero_at_fermi (bool, optional): if True Fermi energy will be equal to zero
sm_param (dict, optional): parameters for smooth DOS.
E_min (float, str): minimum value in DOS calculation. If E_min == 'min' left border of energy
is equal to the minimum eigenvalue
E_max (float, str): maximum value in DOS calculation. If E_max == 'max' right border of energy
is equal to the maximum eigenvalue
bw_method (float): The method used to calculate the estimator bandwidth. This can be 'scott',
'silverman', a scalar constant or a callable. If a scalar, this will be used directly as `kde.factor`.
If a callable, it should take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used.
nelec (int): Number of electrons in the system. DOS integral to efermi should be equal to the nelec
Returns:
E, DOS - Two 1D np.arrays that contain energy and according DOS values
"""
if 'zero_at_fermi' in kwargs:
zero_at_fermi = kwargs['zero_at_fermi']
else:
zero_at_fermi = False
if 'dE' in kwargs:
dE = kwargs['dE']
else:
dE = 0.01
if 'smearing' in kwargs:
smearing = kwargs['smearing']
else:
smearing = 'Gaussian'
if smearing == 'Gaussian':
if 'sigma' in kwargs:
sigma = kwargs['sigma']
else:
sigma = 0.02
if 'emin' in kwargs:
E_min = kwargs['emin']
else:
E_min = np.min(self.eigenvalues)
if 'emax' in kwargs:
E_max = kwargs['emax']
else:
E_max = np.max(self.eigenvalues)
E_arr = np.arange(E_min, E_max, dE)
DOS_arr = np.zeros_like(E_arr)
for energy_kpt, weight in zip(self.eigenvalues, self.weights):
for energy in energy_kpt:
DOS_arr += weight * self._GaussianSmearing(E_arr, energy, sigma)
# 2 is not used because sum(weights) = 2
if zero_at_fermi:
return E_arr - self.efermi, DOS_arr
else:
return E_arr, DOS_arr

View File

@@ -0,0 +1,394 @@
import numpy as np
from echem.core.constants import ElemNum2Name, ElemName2Num, Bohr2Angstrom, Angstrom2Bohr
from echem.core.structure import Structure
import warnings
class Cube:
def __init__(self,
data: np.ndarray,
structure: Structure,
origin: np.ndarray,
units_data: str = 'Bohr',
comment: str = None,
charges=None,
dset_ids=None):
self.volumetric_data = data
self.structure = structure
self.origin = origin
self.units_data = units_data
if comment is None:
self.comment = 'Comment is not defined\nGood luck!\n'
else:
self.comment = comment
if charges is None:
self.charges = np.zeros(structure.natoms)
else:
self.charges = charges
self.dset_ids = dset_ids
def __repr__(self):
shape = self.volumetric_data.shape
return f'{self.comment}\n' + f'NX: {shape[0]}\nNY: {shape[1]}\nNZ: {shape[2]}\n' + \
f'Origin:\n{self.origin[0]:.5f} {self.origin[1]:.5f} {self.origin[2]:.5f}\n' + \
repr(self.structure)
def __add__(self, other):
assert isinstance(other, Cube), 'Other object must belong to Cube class'
assert np.array_equal(self.origin, other.origin), 'Two Cube instances must have the same origin'
assert self.volumetric_data.shape == other.volumetric_data.shape, 'Two Cube instances must have ' \
'the same shape of volumetric_data'
if self.structure != other.structure:
warnings.warn('Two Cube instances have different structures. '
'The structure will be taken from the 1st (self) instance. '
'Hope you know, what you are doing')
return Cube(self.volumetric_data + other.volumetric_data, self.structure, self.origin)
def __sub__(self, other):
assert isinstance(other, Cube), 'Other object must belong to Cube class'
assert np.array_equal(self.origin, other.origin), 'Two Cube instances must have the same origin'
assert self.volumetric_data.shape == other.volumetric_data.shape, 'Two Cube instances must have ' \
'the same shape of volumetric_data'
if self.structure != other.structure:
warnings.warn('\nTwo Cube instances have different structures. '
'The structure will be taken from the 1st (self) instance. '
'Hope you know, what you are doing')
return Cube(self.volumetric_data - other.volumetric_data, self.structure, self.origin)
def __neg__(self):
return Cube(-self.volumetric_data, self.structure, self.origin)
def __mul__(self, other):
assert isinstance(other, Cube), 'Other object must belong to Cube class'
assert np.array_equal(self.origin, other.origin), 'Two Cube instances must have the same origin'
assert self.volumetric_data.shape == other.volumetric_data.shape, 'Two Cube instances must have ' \
'the same shape of volumetric_data'
if self.structure != other.structure:
warnings.warn('\nTwo Cube instances have different structures. '
'The structure will be taken from the 1st (self) instance. '
'Hope you know, what you are doing')
return Cube(self.volumetric_data * other.volumetric_data, self.structure, self.origin)
@staticmethod
def from_file(filepath):
with open(filepath, 'rt') as file:
comment_1 = file.readline()
comment_2 = file.readline()
comment = comment_1 + comment_2
line = file.readline().split()
natoms = int(line[0])
if natoms < 0:
dset_ids_flag = True
natoms = abs(natoms)
else:
dset_ids_flag = False
origin = np.array([float(line[1]), float(line[2]), float(line[3])])
if len(line) == 4:
n_data = 1
elif len(line) == 5:
n_data = int(line[4])
line = file.readline().split()
NX = int(line[0])
xaxis = np.array([float(line[1]), float(line[2]), float(line[3])])
line = file.readline().split()
NY = int(line[0])
yaxis = np.array([float(line[1]), float(line[2]), float(line[3])])
line = file.readline().split()
NZ = int(line[0])
zaxis = np.array([float(line[1]), float(line[2]), float(line[3])])
if NX > 0 and NY > 0 and NZ > 0:
units = 'Bohr'
elif NX < 0 and NY < 0 and NZ < 0:
units = 'Angstrom'
else:
raise ValueError('The sign of the number of all voxels should be > 0 or < 0')
if units == 'Angstrom':
NX, NY, NZ = -NX, -NY, -NZ
lattice = np.array([xaxis * NX, yaxis * NY, zaxis * NZ])
species = []
charges = np.zeros(natoms)
coords = np.zeros((natoms, 3))
for atom in range(natoms):
line = file.readline().split()
species += [ElemNum2Name[int(line[0])]]
charges[atom] = float(line[1])
coords[atom, :] = line[2:]
if units == 'Bohr':
lattice = Bohr2Angstrom * lattice
coords = Bohr2Angstrom * coords
origin = Bohr2Angstrom * origin
structure = Structure(lattice, species, coords, coords_are_cartesian=True)
dset_ids = None
dset_ids_processed = -1
if dset_ids_flag is True:
dset_ids = []
line = file.readline().split()
n_data = int(line[0])
if n_data < 1:
raise ValueError(f'Bad value of n_data: {n_data}')
dset_ids_processed += len(line)
dset_ids += [int(i) for i in line[1:]]
while dset_ids_processed < n_data:
line = file.readline().split()
dset_ids_processed += len(line)
dset_ids += [int(i) for i in line]
dset_ids = np.array(dset_ids)
if n_data != 1:
raise NotImplemented(f'The processing of cube files with more than 1 data values is not implemented.'
f' n_data = {n_data}')
data = np.zeros((NX, NY, NZ))
indexes = np.arange(0, NX * NY * NZ)
indexes_1 = indexes // (NY * NZ)
indexes_2 = (indexes // NZ) % NY
indexes_3 = indexes % NZ
i = 0
for line in file:
for value in line.split():
data[indexes_1[i], indexes_2[i], indexes_3[i]] = float(value)
i += 1
return Cube(data, structure, origin, units, comment, charges, dset_ids)
#def reduce(self, factor):
# from skimage.measure import block_reduce
# try:
# volumetric_data_reduced = block_reduce(self.volumetric_data, block_size=(factor, factor, factor), func=np.mean)
# Ns_reduced = np.shape(volumetric_data_reduced)
# except:
# raise ValueError('Try another factor value')
# return Cube(volumetric_data_reduced, self.structure, self.comment, Ns_reduced, self.charges)
def to_file(self, filepath, units='Bohr'):
if not self.structure.coords_are_cartesian:
self.structure.mod_coords_to_cartesian()
Ns = np.array(self.volumetric_data.shape)
width_Ni = len(str(np.max(Ns)))
if units == 'Angstrom':
Ns = - Ns
width_Ni += 1
width_lattice = len(str(int(np.max(self.structure.lattice)))) + 7
width_coord = len(str(int(np.max(self.structure.coords)))) + 7
elif units == 'Bohr':
lattice = self.get_voxel() * Angstrom2Bohr
coords = self.structure.coords * Angstrom2Bohr
origin = self.origin * Angstrom2Bohr
width_lattice = len(str(int(np.max(lattice)))) + 7
width_coord = len(str(int(np.max(coords)))) + 7
else:
raise ValueError(f'Irregular units flag: {units}. Units must be \'Bohr\' or \'Angstrom\'')
if np.sum(self.structure.lattice < 0):
width_lattice += 1
if np.sum(self.structure.coords < 0):
width_coord += 1
width = np.max([width_lattice, width_coord])
if self.dset_ids is not None:
natoms = - self.structure.natoms
else:
natoms = self.structure.natoms
width_natoms = len(str(natoms))
width_1_column = max(width_Ni, width_natoms)
with open(filepath, 'w') as file:
file.write(self.comment)
if units == 'Angstrom':
file.write(f' {natoms:{width_1_column}} {self.origin[0]:{width}.6f} '
f' {self.origin[1]:{width}.6f} {self.origin[2]:{width}.6f}\n')
for N_i, lattice_vector in zip(Ns, self.get_voxel()):
file.write(f' {N_i:{width_1_column}} {lattice_vector[0]:{width}.6f} '
f' {lattice_vector[1]:{width}.6f} {lattice_vector[2]:{width}.6f}\n')
for atom_name, charge, coord in zip(self.structure.species, self.charges, self.structure.coords):
file.write(
f' {ElemName2Num[atom_name]:{width_1_column}} {charge:{width}.6f} '
f' {coord[0]:{width}.6f} {coord[1]:{width}.6f} {coord[2]:{width}.6f}\n')
elif units == 'Bohr':
file.write(f' {natoms:{width_1_column}} {origin[0]:{width}.6f} '
f' {origin[1]:{width}.6f} {origin[2]:{width}.6f}\n')
for N_i, lattice_vector in zip(Ns, lattice):
file.write(f' {N_i:{width_1_column}} {lattice_vector[0]:{width}.6f} '
f' {lattice_vector[1]:{width}.6f} {lattice_vector[2]:{width}.6f}\n')
for atom_name, charge, coord in zip(self.structure.species, self.charges, coords):
file.write(
f' {ElemName2Num[atom_name]:{width_1_column}} {charge:{width}.6f} '
f' {coord[0]:{width}.6f} {coord[1]:{width}.6f} {coord[2]:{width}.6f}\n')
else:
raise ValueError(f'Irregular units flag: {units}. Units must be \'Bohr\' or \'Angstrom\'')
if self.dset_ids is not None:
m = len(self.dset_ids)
file.write(f' {m:{width_1_column}}' + ' ')
for dset_id in self.dset_ids:
file.write(str(dset_id) + ' ')
file.write('\n')
for i in range(abs(Ns[0])):
for j in range(abs(Ns[1])):
for k in range(abs(Ns[2])):
file.write(str(' %.5E' % self.volumetric_data[i][j][k]))
if k % 6 == 5:
file.write('\n')
file.write('\n')
def mod_to_zero_origin(self):
self.structure.coords -= self.origin
self.origin = np.zeros(3)
def get_average_along_axis(self, axis):
"""
Gets average value along axis
Args:
axis (int):
if 0 than average along x wil be calculated
if 1 along y
if 2 along z
Returns:
np.array of average value along selected axis
"""
if axis == 2:
return np.mean(self.volumetric_data, (0, 1))
elif axis == 1:
return np.mean(self.volumetric_data, (0, 2))
elif axis == 0:
return np.mean(self.volumetric_data, (1, 2))
else:
raise ValueError('axis can be only 0, 1 or 2')
def get_average_along_axis_max(self, axis: int, scale=None):
"""Calculate the vacuum level (the maximum planar average value along selected axis)
Args:
axis (int): The axis number along which the planar average is calculated. The first axis is 0
scale (float): The value that is multiplying by the result. It's used for converting between
different units
Returns:
(float): The vacuum level multiplied by scale factor
"""
avr = self.get_average_along_axis(axis)
if scale is None:
return np.max(avr)
else:
return scale * np.max(avr)
def get_voxel(self, units='Angstrom'):
NX, NY, NZ = self.volumetric_data.shape
voxel = self.structure.lattice.copy()
voxel[0] /= NX
voxel[1] /= NY
voxel[2] /= NZ
if units == 'Angstrom':
return voxel
elif units == 'Bohr':
return voxel * Angstrom2Bohr
else:
raise ValueError('units can be \'Angstrom\' or \'Bohr\'')
def get_integrated_number(self):
if self.units_data == 'Bohr':
voxel_volume = np.linalg.det(self.get_voxel(units='Bohr'))
return voxel_volume * np.sum(self.volumetric_data)
else:
raise NotImplemented()
def assign_top_n_data_to_atoms(self, n_top, r):
"""Assign top n abs of volumetric data to atoms. Might be used to assign electron density to atoms.
Args:
n_top (int): Number of voxels that will be analysed
r (float): Radius. A voxel is considered belonging to atom is the distance between the voxel center and ]
atom is less than r.
Returns:
(np.ndarray): Array of boolean values. I-th raw represents i-th atom, j-th column represents j-th voxel
"""
sorted_indices = np.array(np.unravel_index(np.argsort(-np.abs(self.volumetric_data), axis=None),
self.volumetric_data.shape)).T
translation_vector = np.sum(self.structure.lattice, axis=0)
voxels_centres = sorted_indices[:n_top, :] * translation_vector + translation_vector / 2 + self.origin
atom_indices = list(range(self.structure.natoms))
if self.structure.natoms == 1:
return np.linalg.norm(voxels_centres - self.structure.coords[0], axis=-1) < r
else:
return np.linalg.norm(np.broadcast_to(voxels_centres, (self.structure.natoms,) + voxels_centres.shape) -
np.expand_dims(self.structure.coords[atom_indices], axis=1), axis=-1) < r
class Xyz:
def __init__(self, structure, comment):
self.structure = structure
self.comment = comment
@staticmethod
def from_file(filepath):
with open(filepath, 'rt') as file:
natoms = int(file.readline().strip())
comment = file.readline()
coords = np.zeros((natoms, 3))
species = []
for i in range(natoms):
line = file.readline().split()
species.append(line[0])
coords[i] = [float(j) for j in line[1:]]
struct = Structure(np.zeros((3, 3)), species, coords, coords_are_cartesian=True)
return Xyz(struct, comment)
class XyzTrajectory:
def __init__(self, first_xyz, trajectory):
self.first_xyz = first_xyz
self.trajectory = trajectory
@staticmethod
def from_file(filepath):
first_xyz = Xyz.from_file(filepath)
trajectory = []
with open(filepath, 'rt') as file:
while True:
try:
natoms = int(file.readline().strip())
except:
break
file.readline()
coords = np.zeros((natoms, 3))
for i in range(natoms):
line = file.readline().split()
coords[i] = [float(j) for j in line[1:]]
trajectory.append(coords)
return XyzTrajectory(first_xyz, np.array(trajectory))

View File

@@ -0,0 +1,675 @@
from __future__ import annotations
import numpy as np
from typing import Union, List, Iterable
from monty.re import regrep
from echem.core.structure import Structure
from ..io_data.universal import Cube
from echem.core.electronic_structure import EBS
from echem.core.ionic_dynamics import IonicDynamics
from echem.core.constants import Angstrom2Bohr
from . import jdftx
from pymatgen.io.vasp import Procar as Procar_pmg
from nptyping import NDArray, Shape, Number
from pathlib import Path
import warnings
class Poscar:
"""Class that reads VASP POSCAR files"""
def __init__(self,
structure: Structure,
comment: str = None,
sdynamics_data: list = None):
"""
Create a Poscar instance
Args:
structure (Structure class): a base class that contains lattice, coords and species information
comment (str): a VASP comment
sdynamics_data (list, 2D np.array): data about selective dynamics for each atom. [['T', 'T', 'F'],
['F', 'F', 'F'],...]
"""
self.structure = structure
self.comment = comment
self.sdynamics_data = sdynamics_data
def __repr__(self):
return f'{self.comment}\n' + repr(self.structure)
@staticmethod
def from_file(filepath: str | Path):
"""
Static method to read a POSCAR file
Args:
filepath: path to the POSCAR file
Returns:
Poscar class object
"""
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'r')
data = file.readlines()
file.close()
comment = data[0].strip()
scale = float(data[1])
lattice = np.array([[float(i) for i in line.split()] for line in data[2:5]])
if scale < 0:
# In VASP, a negative scale factor is treated as a volume.
# We need to translate this to a proper lattice vector scaling.
vol = abs(np.linalg.det(lattice))
lattice *= (-scale / vol) ** (1 / 3)
else:
lattice *= scale
name_species = data[5].split()
num_species = [int(i) for i in data[6].split()]
species = []
for name, num in zip(name_species, num_species):
species += [name]*num
sdynamics_is_used = False
start_atoms = 8
if data[7][0] in 'sS':
sdynamics_is_used = True
start_atoms = 9
coords_are_cartesian = False
if sdynamics_is_used:
if data[8][0] in 'cCkK':
coords_are_cartesian = True
else:
if data[7][0] in 'cCkK':
coords_are_cartesian = True
coords = []
coords_scale = scale if coords_are_cartesian else 1
sdynamics_data = list() if sdynamics_is_used else None
for i in range(start_atoms, start_atoms + np.sum(num_species), 1):
line = data[i].split()
coords.append([float(j) * coords_scale for j in line[:3]])
if sdynamics_is_used:
for i in range(start_atoms, start_atoms + np.sum(num_species), 1):
line = data[i].split()
sdynamics_data.append([j for j in line[3:6]])
struct = Structure(lattice, species, coords, coords_are_cartesian)
if sdynamics_is_used:
return Poscar(struct, comment, sdynamics_data)
else:
return Poscar(struct, comment)
def to_file(self, filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'w')
file.write(f'{self.comment}\n')
file.write('1\n')
for vector in self.structure.lattice:
file.write(f' {vector[0]} {vector[1]} {vector[2]}\n')
species = np.array(self.structure.species)
sorted_order = np.argsort(species, kind='stable')
unique, counts = np.unique(species, return_counts=True)
line = ' '
for u in unique:
line += u + ' '
file.write(line + '\n')
line = ' '
for c in counts:
line += str(c) + ' '
file.write(line + '\n')
if self.sdynamics_data is not None:
file.write('Selective dynamics\n')
if self.structure.coords_are_cartesian:
file.write('Cartesian\n')
else:
file.write('Direct\n')
if self.sdynamics_data is None:
for i in sorted_order:
atom = self.structure.coords[i]
file.write(f' {atom[0]} {atom[1]} {atom[2]}\n')
else:
for i in sorted_order:
atom = self.structure.coords[i]
sd_atom = self.sdynamics_data[i]
file.write(f' {atom[0]} {atom[1]} {atom[2]} {sd_atom[0]} {sd_atom[1]} {sd_atom[2]}\n')
file.close()
def convert(self, format):
if format == 'jdftx':
self.mod_coords_to_cartesian()
return jdftx.Ionpos(self.structure.species, self.structure.coords * Angstrom2Bohr), \
jdftx.Lattice(np.transpose(self.structure.lattice) * Angstrom2Bohr)
else:
raise ValueError('Only format = jdftx is supported')
def mod_add_atoms(self, coords, species, sdynamics_data=None):
self.structure.mod_add_atoms(coords, species)
if sdynamics_data is not None:
if any(isinstance(el, list) for el in sdynamics_data):
for sd_atom in sdynamics_data:
self.sdynamics_data.append(sd_atom)
else:
self.sdynamics_data.append(sdynamics_data)
def mod_change_atoms(self, ids: Union[int, Iterable],
new_coords: Union[Iterable[float], Iterable[Iterable[float]]] = None,
new_species: Union[str, List[str]] = None,
new_sdynamics_data: Union[Iterable[str], Iterable[Iterable[str]]] = None):
self.structure.mod_change_atoms(ids, new_coords, new_species)
if new_sdynamics_data is not None:
if self.sdynamics_data is None:
self.sdynamics_data = [['T', 'T', 'T'] for _ in range(self.structure.natoms)]
if isinstance(ids, Iterable):
for i, new_sdata in zip(ids, new_sdynamics_data):
self.sdynamics_data[i] = new_sdata
else:
self.sdynamics_data[ids] = new_sdynamics_data
def mod_coords_to_box(self):
assert self.structure.coords_are_cartesian is False, 'This operation allowed only for NON-cartesian coords'
self.structure.coords %= 1
def mod_coords_to_direct(self):
self.structure.mod_coords_to_direct()
def mod_coords_to_cartesian(self):
self.structure.mod_coords_to_cartesian()
class Outcar(EBS, IonicDynamics):
"""Class that reads VASP OUTCAR files"""
def __init__(self,
weights: NDArray[Shape['Nkpts'], Number],
efermi_hist: NDArray[Shape['Nisteps'], Number],
eigenvalues_hist: NDArray[Shape['Nisteps, Nspin, Nkpts, Nbands'], Number],
occupations_hist: NDArray[Shape['Nisteps, Nspin, Nkpts, Nbands'], Number],
energy_hist: NDArray[Shape['Nallsteps'], Number],
energy_ionic_hist: NDArray[Shape['Nisteps'], Number],
forces_hist: NDArray[Shape['Nispeps, Natoms, 3'], Number]):
EBS.__init__(self, eigenvalues_hist[-1], weights, efermi_hist[-1], occupations_hist[-1])
IonicDynamics.__init__(self, forces_hist, None, None, None)
self.efermi_hist = efermi_hist
self.energy_hist = energy_hist
self.energy_ionic_hist = energy_ionic_hist
self.eigenvalues_hist = eigenvalues_hist
self.occupations_hist = occupations_hist
def __add__(self, other):
"""
Concatenates Outcar files (all histories). It is useful for ionic optimization.
If k-point meshes from two Outcars are different, weights, eigenvalues and occupations will be taken
from the 2nd (other) Outcar instance
Args:
other (Outcar class): Outcar that should be added to the current Outcar
Returns (Outcar class):
New Outcar with concatenated histories
"""
assert isinstance(other, Outcar), 'Other object must belong to Outcar class'
assert self.natoms == other.natoms, 'Number of atoms of two files must be equal'
if not np.array_equal(self.weights, other.weights):
warnings.warn('Two Outcar instances have been calculated with different k-point folding. '
'Weights, eigenvalues and occupations will be taken from the 2nd (other) instance. '
'Hope you know, what you are doing')
return Outcar(other.weights,
np.concatenate((self.efermi_hist, other.efermi_hist)),
other.eigenvalues_hist,
other.occupations_hist,
np.concatenate((self.energy_hist, other.energy_hist)),
np.concatenate((self.energy_ionic_hist, other.energy_ionic_hist)),
np.concatenate((self.forces_hist, other.forces_hist)))
return Outcar(other.weights,
np.concatenate((self.efermi_hist, other.efermi_hist)),
np.concatenate((self.eigenvalues_hist, other.eigenvalues_hist)),
np.concatenate((self.occupations_hist, other.occupations_hist)),
np.concatenate((self.energy_hist, other.energy_hist)),
np.concatenate((self.energy_ionic_hist, other.energy_ionic_hist)),
np.concatenate((self.forces_hist, other.forces_hist)))
@property
def natoms(self):
return self.forces.shape[0]
@property
def nisteps(self):
return self.energy_ionic_hist.shape[0]
@property
def forces(self):
return self.forces_hist[-1]
@property
def energy(self):
return self.energy_ionic_hist[-1]
@staticmethod
def from_file(filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'nkpts': r'k-points\s+NKPTS\s+=\s+(\d+)',
'nbands': r'number of bands\s+NBANDS=\s+(\d+)',
'natoms': r'NIONS\s+=\s+(\d+)',
'weights': 'Following reciprocal coordinates:',
'efermi': r'E-fermi\s:\s+([-.\d]+)',
'energy': r'free energy\s+TOTEN\s+=\s+(.\d+\.\d+)\s+eV',
'energy_ionic': r'free energy\s+TOTEN\s+=\s+(.\d+\.\d+)\s+eV',
'kpoints': r'k-point\s+(\d+)\s:\s+[-.\d]+\s+[-.\d]+\s+[-.\d]+\n',
'forces': r'\s+POSITION\s+TOTAL-FORCE',
'spin': r'spin component \d+\n'}
matches = regrep(str(filepath), patterns)
nbands = int(matches['nbands'][0][0][0])
nkpts = int(matches['nkpts'][0][0][0])
natoms = int(matches['natoms'][0][0][0])
energy_hist = np.array([float(i[0][0]) for i in matches['energy']])
energy_ionic_hist = np.array([float(i[0][0]) for i in matches['energy_ionic']])
if matches['spin']:
nspin = 2
else:
nspin = 1
if nkpts == 1:
weights = np.array([float(data[matches['weights'][0][1] + 2].split()[3])])
else:
weights = np.zeros(nkpts)
for i in range(nkpts):
weights[i] = float(data[matches['weights'][0][1] + 2 + i].split()[3])
weights /= np.sum(weights)
arr = matches['efermi']
efermi_hist = np.zeros(len(arr))
for i in range(len(arr)):
efermi_hist[i] = float(arr[i][0][0])
nisteps = len(energy_ionic_hist)
eigenvalues_hist = np.zeros((nisteps, nspin, nkpts, nbands))
occupations_hist = np.zeros((nisteps, nspin, nkpts, nbands))
each_kpoint_list = np.array([[int(j[0][0]), int(j[1])] for j in matches['kpoints']])
for step in range(nisteps):
for spin in range(nspin):
for kpoint in range(nkpts):
arr = data[each_kpoint_list[nkpts * nspin * step + nkpts * spin + kpoint, 1] + 2:
each_kpoint_list[nkpts * nspin * step + nkpts * spin + kpoint, 1] + 2 + nbands]
eigenvalues_hist[step, spin, kpoint] = [float(i.split()[1]) for i in arr]
occupations_hist[step, spin, kpoint] = [float(i.split()[2]) for i in arr]
arr = matches['forces']
forces_hist = np.zeros((nisteps, natoms, 3))
for step in range(nisteps):
for atom in range(natoms):
line = data[arr[step][1] + atom + 2:arr[step][1] + atom + 3]
line = line[0].split()
forces_hist[step, atom] = [float(line[3]), float(line[4]), float(line[5])]
return Outcar(weights, efermi_hist, eigenvalues_hist, occupations_hist,
energy_hist, energy_ionic_hist, forces_hist)
class Wavecar:
"""Class that reads VASP WAVECAR files"""
# TODO: add useful functions for Wavecar class: plot charge density, plot real and imag parts etc.
def __init__(self, kb_array, wavefunctions, ngrid_factor):
self.kb_array = kb_array
self.wavefunctions = wavefunctions
self.ngrid_factor = ngrid_factor
@staticmethod
def from_file(filepath, kb_array, ngrid_factor=1.5):
from echem.core.vaspwfc_p3 import vaspwfc
wfc = vaspwfc(filepath)
wavefunctions = []
for kb in kb_array:
kpoint = kb[0]
band = kb[1]
wf = wfc.wfc_r(ikpt=kpoint, iband=band, ngrid=wfc._ngrid * ngrid_factor)
wavefunctions.append(wf)
return Wavecar(kb_array, wavefunctions, ngrid_factor)
class Procar:
def __init__(self, proj_koeffs, orbital_names):
self.proj_koeffs = proj_koeffs
self.eigenvalues = None
self.weights = None
self.nspin = None
self.nkpts = None
self.nbands = None
self.efermi = None
self.natoms = None
self.norbs = proj_koeffs.shape[4]
self.orbital_names = orbital_names
@staticmethod
def from_file(filepath):
procar = Procar_pmg(filepath)
spin_keys = list(procar.data.keys())
proj_koeffs = np.zeros((len(spin_keys),) + procar.data[spin_keys[0]].shape)
for i, spin_key in enumerate(spin_keys):
proj_koeffs[i] = procar.data[spin_key]
return Procar(proj_koeffs, procar.orbitals)
def get_PDOS(self, outcar: Outcar, atom_numbers, **kwargs):
self.eigenvalues = outcar.eigenvalues
self.weights = outcar.weights
self.nspin = outcar.nspin
self.nkpts = outcar.nkpts
self.nbands = outcar.nbands
self.efermi = outcar.efermi
self.natoms = outcar.natoms
if 'zero_at_fermi' in kwargs:
zero_at_fermi = kwargs['zero_at_fermi']
else:
zero_at_fermi = False
if 'dE' in kwargs:
dE = kwargs['dE']
else:
dE = 0.01
if 'smearing' in kwargs:
smearing = kwargs['smearing']
else:
smearing = 'Gaussian'
if smearing == 'Gaussian':
if 'sigma' in kwargs:
sigma = kwargs['sigma']
else:
sigma = 0.02
if 'emin' in kwargs:
E_min = kwargs['emin']
else:
E_min = np.min(self.eigenvalues)
if 'emax' in kwargs:
E_max = kwargs['emax']
else:
E_max = np.max(self.eigenvalues)
else:
raise ValueError(f'Only Gaussian smearing is supported but you used {smearing} instead')
E_arr = np.arange(E_min, E_max, dE)
ngrid = E_arr.shape[0]
proj_coeffs_weighted = self.proj_koeffs[:, :, :, atom_numbers, :]
for spin in range(self.nspin):
for i, weight_kpt in enumerate(self.weights):
proj_coeffs_weighted[spin, i] *= weight_kpt
W_arr = np.moveaxis(proj_coeffs_weighted, [2, 3, 4], [4, 2, 3])
G_arr = EBS.gaussian_smearing(E_arr, self.eigenvalues, sigma)
PDOS_arr = np.zeros((self.nspin, len(atom_numbers), self.norbs, ngrid))
for spin in range(self.nspin):
for atom in range(len(atom_numbers)):
PDOS_arr[spin, atom] = np.sum(G_arr[spin, :, None, :, :] * W_arr[spin, :, atom, :, :, None],
axis=(0, 2))
if self.nspin == 1:
PDOS_arr *= 2
if zero_at_fermi:
return E_arr - self.efermi, PDOS_arr
else:
return E_arr, PDOS_arr
class Chgcar:
"""
Class for reading CHG and CHGCAR files from vasp
For now, we ignore augmentation occupancies data
"""
def __init__(self, structure, charge_density, spin_density=None):
self.structure = structure
self.charge_density = charge_density
self.spin_density = spin_density
@staticmethod
def from_file(filepath):
poscar = Poscar.from_file(filepath)
structure = poscar.structure
volumetric_data = []
read_data = False
with open(filepath, 'r') as file:
for i in range(8 + structure.natoms):
file.readline()
for line in file:
line_data = line.strip().split()
if read_data:
for value in line_data:
if i < length - 1:
data[indexes_1[i], indexes_2[i], indexes_3[i]] = float(value)
i += 1
else:
data[indexes_1[i], indexes_2[i], indexes_3[i]] = float(value)
read_data = False
volumetric_data.append(data)
else:
if len(line_data) == 3:
try:
shape = np.array(list(map(int, line_data)))
except:
pass
else:
read_data = True
nx, ny, nz = shape
data = np.zeros(shape)
length = np.prod(shape)
i = 0
indexes = np.arange(0, length)
indexes_1 = indexes % nx
indexes_2 = (indexes // nx) % ny
indexes_3 = indexes // (nx * ny)
if len(volumetric_data) == 1:
return Chgcar(structure, volumetric_data[0])
elif len(volumetric_data) == 2:
return Chgcar(structure, volumetric_data[0], volumetric_data[1])
else:
raise ValueError(f'The file contains more than 2 volumetric data, len = {len(volumetric_data)}')
def convert_to_cube(self, volumetric_data='charge_density'):
comment = ' Cube file was created using Electrochemistry package\n'
if volumetric_data == 'charge_density':
return Cube(data=self.charge_density,
structure=self.structure,
comment=comment+' Charge Density\n',
origin=np.zeros(3))
elif volumetric_data == 'spin_density':
return Cube(data=self.spin_density,
structure=self.structure,
comment=comment + ' Spin Density\n',
origin=np.zeros(3))
elif volumetric_data == 'spin_major':
return Cube(data=(self.charge_density + self.spin_density)/2,
structure=self.structure,
comment=comment+' Major Spin\n',
origin=np.zeros(3))
elif volumetric_data == 'spin_minor':
return Cube(data=(self.charge_density - self.spin_density)/2,
structure=self.structure,
comment=comment+' Minor Spin\n',
origin=np.zeros(3))
def to_file(self, filepath):
#TODO write to_file func
pass
class Xdatcar:
"""Class that reads VASP XDATCAR files"""
def __init__(self,
structure,
comment: str = None,
trajectory=None):
"""
Create an Xdatcar instance
Args:
structure (Structure class): a base class that contains lattice, coords and species information
comment (str): a VASP comment
trajectory (3D np.array): contains coordinates of all atoms along with trajectory. It has the shape
n_steps x n_atoms x 3
"""
self.structure = structure
self.comment = comment
self.trajectory = trajectory
def __add__(self, other):
"""
Concatenates Xdatcar files (theirs trajectory)
Args:
other (Xdatcar class): Xdatcar that should be added to the current Xdatcar
Returns (Xdatcar class):
New Xdatcar with concatenated trajectory
"""
assert isinstance(other, Xdatcar), 'Other object must belong to Xdatcar class'
assert np.array_equal(self.structure.lattice, other.structure.lattice), 'Lattices of two files must be equal'
assert self.structure.species == other.structure.species, 'Species in two files must be identical'
assert self.structure.coords_are_cartesian == other.structure.coords_are_cartesian, \
'Coords must be in the same coordinate system'
trajectory = np.vstack((self.trajectory, other.trajectory))
return Xdatcar(self.structure, self.comment + ' + ' + other.comment, trajectory)
def add(self, other):
"""
Concatenates Xdatcar files (theirs trajectory)
Args:
other (Xdatcar class): Xdatcar that should be added to the current Xdatcar
Returns (Xdatcar class):
New Xdatcar with concatenated trajectory
"""
return self.__add__(other)
def add_(self, other):
"""
Concatenates Xdatcar files (theirs trajectory). It's inplace operation, current Xdatcar will be modified
Args:
other (Xdatcar class): Xdatcar that should be added to the current Xdatcar
"""
assert isinstance(other, Xdatcar), 'Other object must belong to Xdatcar class'
assert np.array_equal(self.structure.lattice, other.structure.lattice), 'Lattices of two files mist be equal'
assert self.structure.species == other.structure.species, 'Species in two files must be identical'
assert self.structure.coords_are_cartesian == other.structure.coords_are_cartesian, \
'Coords must be in the same coordinate system'
self.trajectory = np.vstack((self.trajectory, other.trajectory))
@property
def nsteps(self):
return len(self.trajectory)
@staticmethod
def from_file(filepath):
"""
Static method to read a XDATCAR file
Args:
filepath: path to the XDATCAR file
Returns:
Xdatcar class object
"""
file = open(filepath, 'r')
data = file.readlines()
file.close()
comment = data[0].strip()
scale = float(data[1])
lattice = np.array([[float(i) for i in line.split()] for line in data[2:5]])
if scale < 0:
# In VASP, a negative scale factor is treated as a volume.
# We need to translate this to a proper lattice vector scaling.
vol = abs(np.linalg.det(lattice))
lattice *= (-scale / vol) ** (1 / 3)
else:
lattice *= scale
name_species = data[5].split()
num_species = [int(i) for i in data[6].split()]
species = []
for name, num in zip(name_species, num_species):
species += [name] * num
n_atoms = np.sum(num_species)
n_steps = int((len(data) - 7) / (n_atoms + 1))
trajectory = np.zeros((n_steps, n_atoms, 3))
for i in range(n_steps):
atom_start = 8 + i * (n_atoms + 1)
atom_stop = 7 + (i + 1) * (n_atoms + 1)
data_step = [line.split() for line in data[atom_start:atom_stop]]
for j in range(n_atoms):
trajectory[i, j] = [float(k) for k in data_step[j]]
struct = Structure(lattice, species, trajectory[0], coords_are_cartesian=False)
return Xdatcar(struct, comment, trajectory)
def to_file(self, filepath):
file = open(filepath, 'w')
file.write(f'{self.comment}\n')
file.write('1\n')
for vector in self.structure.lattice:
file.write(f' {vector[0]} {vector[1]} {vector[2]}\n')
species = np.array(self.structure.species)
sorted_order = np.argsort(species, kind='stable')
sorted_trajectory = self.trajectory[:, sorted_order, :]
unique, counts = np.unique(species, return_counts=True)
line = ' '
for u in unique:
line += u + ' '
file.write(line + '\n')
line = ' '
for c in counts:
line += str(c) + ' '
file.write(line + '\n')
for i in range(self.nsteps):
file.write(f'Direct configuration= {i + 1}\n')
for j in range(self.structure.natoms):
file.write(f' {sorted_trajectory[i, j, 0]} '
f'{sorted_trajectory[i, j, 1]} '
f'{sorted_trajectory[i, j, 2]}\n')
file.close()
def mod_coords_to_cartesian(self):
if self.structure.coords_are_cartesian is True:
return 'Coords are already cartesian'
else:
self.trajectory = np.matmul(self.trajectory, self.structure.lattice)
self.structure.mod_coords_to_cartesian()
def mod_coords_to_box(self):
assert self.structure.coords_are_cartesian is False, 'This operation allowed only for NON-cartesian coords'
self.trajectory %= 1
self.structure.coords %= 1

View File

View File

@@ -0,0 +1,651 @@
"""
AutoNEB realization from ASE package
E. L. Kolsbjerg, M. N. Groves, and B. Hammer, J. Chem. Phys, 145, 094107, 2016. (doi: 10.1063/1.4961868)
modified by: Sergey Pavlov
"""
import numpy as np
import shutil
import os
import types
from math import log
from math import exp
from contextlib import ExitStack
from pathlib import Path
from warnings import warn
from ase.io import Trajectory
from ase.io import read
from ase.mep.neb import NEB
from ase.optimize import BFGS
from ase.optimize import FIRE
from ase.mep.neb import NEBOptimizer
from ase.calculators.singlepoint import SinglePointCalculator
import ase.parallel as mpi
import functools
print = functools.partial(print, flush=True)
class AutoNEB:
"""AutoNEB object.
The AutoNEB algorithm streamlines the execution of NEB and CI-NEB
calculations following the algorithm described in:
E. L. Kolsbjerg, M. N. Groves, and B. Hammer, J. Chem. Phys,
145, 094107, 2016. (doi: 10.1063/1.4961868)
The user supplies at minimum the two end-points and possibly also some
intermediate images.
The stages are:
1) Define a set of images and name them sequentially.
Must at least have a relaxed starting and ending image
User can supply intermediate guesses which do not need to
have previously determined energies (probably from another
NEB calculation with a lower level of theory)
2) AutoNEB will first evaluate the user provided intermediate images
3) AutoNEB will then add additional images dynamically until n_max
is reached
4) A climbing image will attempt to locate the saddle point
5) All the images between the highest point and the starting point
are further relaxed to smooth the path
6) All the images between the highest point and the ending point are
further relaxed to smooth the path
Step 4 and 5-6 are optional steps!
Parameters:
attach_calculators:
Function which adds valid calculators to the list of images supplied.
prefix: string or path
All files that the AutoNEB method reads and writes are prefixed with
prefix
n_simul: int
The number of relaxations run in parallel.
n_max: int
The number of images along the NEB path when done.
This number includes the two end-points.
Important: due to the dynamic adding of images around the peak n_max
must be updated if the NEB is restarted.
climb: boolean
Should a CI-NEB calculation be done at the top-point
fmax: float or list of floats
The maximum force along the NEB path
maxsteps: int
The maximum number of steps in each NEB relaxation.
If a list is given the first number of steps is used in the build-up
and final scan phase;
the second number of steps is used in the CI step after all images
have been inserted.
k: float
The spring constant along the NEB path
method: str (see neb.py)
Choice betweeen three method:
'aseneb', standard ase NEB implementation
'improvedtangent', published NEB implementation
'eb', full spring force implementation (default)
optimizer: object
Optimizer object, defaults to FIRE
Use of the valid strings 'BFGS' and 'FIRE' is deprecated.
space_energy_ratio: float
The preference for new images to be added in a big energy gab
with a preference around the peak or in the biggest geometric gab.
A space_energy_ratio set to 1 will only considder geometric gabs
while one set to 0 will result in only images for energy
resolution.
The AutoNEB method uses a fixed file-naming convention.
The initial images should have the naming prefix000.traj, prefix001.traj,
... up until the final image in prefix00N.traj
Images are dynamically added in between the first and last image until
n_max images have been reached.
"""
def __init__(self, attach_calculators, prefix, n_simul, n_max,
iter_folder='iterations',
fmax=0.025, maxsteps=10000, k=0.1, climb=True, method='eb',
optimizer='FIRE',
remove_rotation_and_translation=False, space_energy_ratio=0.5,
world=None,
parallel=True, smooth_curve=False, interpolate_method='idpp'):
self.attach_calculators = attach_calculators
self.prefix = Path(prefix)
self.n_simul = n_simul
self.n_max = n_max
self.climb = climb
self.all_images = []
self.parallel = parallel
self.maxsteps = maxsteps
self.fmax = fmax
self.k = k
self.method = method
self.remove_rotation_and_translation = remove_rotation_and_translation
self.space_energy_ratio = space_energy_ratio
if interpolate_method not in ['idpp', 'linear']:
self.interpolate_method = 'idpp'
print('Interpolation method not implementet.',
'Using the IDPP method.')
else:
self.interpolate_method = interpolate_method
if world is None:
world = mpi.world
self.world = world
self.smooth_curve = smooth_curve
if isinstance(optimizer, str):
try:
self.optimizer = {
'BFGS': BFGS, 'FIRE': FIRE, 'NEB': NEBOptimizer}[optimizer]
except KeyError:
raise Exception('Optimizer needs to be BFGS, FIRE or NEB')
else:
self.optimizer = optimizer
self.iter_folder = Path(self.prefix) / iter_folder
self.iter_folder.mkdir(exist_ok=True)
def execute_one_neb(self, n_cur, to_run, climb=False, many_steps=False):
with ExitStack() as exitstack:
self._execute_one_neb(exitstack, n_cur, to_run,
climb=climb, many_steps=many_steps)
def iter_trajpath(self, i, iiter):
"""When doing the i'th NEB optimization a set of files
prefixXXXiter00i.traj exists with XXX ranging from 000 to the N images
currently in the NEB."""
(self.iter_folder / f'iter_{iiter}').mkdir(exist_ok=True)
return self.iter_folder / f'iter_{iiter}' / f'i{i:03d}iter{iiter:03d}.traj'
def _execute_one_neb(self, exitstack, n_cur, to_run,
climb=False, many_steps=False):
'''Internal method which executes one NEB optimization.'''
closelater = exitstack.enter_context
self.iteration += 1
# First we copy around all the images we are not using in this
# neb (for reproducability purposes)
if self.world.rank == 0:
for i in range(n_cur):
if i not in to_run[1: -1]:
filename = self.prefix / f'{i:03d}.traj'
with Trajectory(filename, mode='w',
atoms=self.all_images[i]) as traj:
traj.write()
filename_ref = self.iter_trajpath(i, self.iteration)
if os.path.isfile(filename):
shutil.copy2(filename, filename_ref)
if self.world.rank == 0:
print('Now starting iteration %d on ' % self.iteration, to_run)
# Attach calculators to all the images we will include in the NEB
self.attach_calculators([self.all_images[i] for i in to_run], to_run, self.iteration)
neb = NEB([self.all_images[i] for i in to_run],
k=[self.k[i] for i in to_run[0:-1]],
method=self.method,
parallel=self.parallel,
remove_rotation_and_translation=self
.remove_rotation_and_translation,
climb=climb)
# Do the actual NEB calculation
logpath = (self.iter_folder / f'iter_{self.iteration}'
/ f'log_iter{self.iteration:03d}.log')
qn = closelater(self.optimizer(neb, logfile=logpath))
# Find the ranks which are masters for each their calculation
if self.parallel:
nneb = to_run[0]
nim = len(to_run) - 2
n = self.world.size // nim # number of cpu's per image
j = 1 + self.world.rank // n # my image number
assert nim * n == self.world.size
traj = closelater(Trajectory(
self.prefix / f'{j + nneb:03d}.traj', 'w',
self.all_images[j + nneb],
master=(self.world.rank % n == 0)
))
filename_ref = self.iter_trajpath(j + nneb, self.iteration)
trajhist = closelater(Trajectory(
filename_ref, 'w',
self.all_images[j + nneb],
master=(self.world.rank % n == 0)
))
qn.attach(traj)
qn.attach(trajhist)
else:
num = 1
for i, j in enumerate(to_run[1: -1]):
filename_ref = self.iter_trajpath(j, self.iteration)
trajhist = closelater(Trajectory(
filename_ref, 'w', self.all_images[j]
))
qn.attach(seriel_writer(trajhist, i, num).write)
traj = closelater(Trajectory(
self.prefix / f'{j:03d}.traj', 'w',
self.all_images[j]
))
qn.attach(seriel_writer(traj, i, num).write)
num += 1
if isinstance(self.maxsteps, (list, tuple)) and many_steps:
steps = self.maxsteps[1]
elif isinstance(self.maxsteps, (list, tuple)) and not many_steps:
steps = self.maxsteps[0]
else:
steps = self.maxsteps
if isinstance(self.fmax, (list, tuple)) and many_steps:
fmax = self.fmax[1]
elif isinstance(self.fmax, (list, tuple)) and not many_steps:
fmax = self.fmax[0]
else:
fmax = self.fmax
qn.run(fmax=fmax, steps=steps)
# Remove the calculators and replace them with single
# point calculators and update all the nodes for
# preperration for next iteration
neb.distribute = types.MethodType(store_E_and_F_in_spc, neb)
neb.distribute()
energies = self.get_energies()
print(f'Energies after iteration {self.iteration}: {energies}')
def run(self):
'''Run the AutoNEB optimization algorithm.'''
n_cur = self.__initialize__()
while len(self.all_images) < self.n_simul + 2:
if isinstance(self.k, (float, int)):
self.k = [self.k] * (len(self.all_images) - 1)
if self.world.rank == 0:
print('Now adding images for initial run')
# Insert a new image where the distance between two images is
# the largest
spring_lengths = []
for j in range(n_cur - 1):
spring_vec = self.all_images[j + 1].get_positions() - \
self.all_images[j].get_positions()
spring_lengths.append(np.linalg.norm(spring_vec))
jmax = np.argmax(spring_lengths)
if self.world.rank == 0:
print('Max length between images is at ', jmax)
# The interpolation used to make initial guesses
# If only start and end images supplied make all img at ones
if len(self.all_images) == 2:
n_between = self.n_simul
else:
n_between = 1
toInterpolate = [self.all_images[jmax]]
for i in range(n_between):
toInterpolate += [toInterpolate[0].copy()]
toInterpolate += [self.all_images[jmax + 1]]
neb = NEB(toInterpolate)
neb.interpolate(method=self.interpolate_method)
tmp = self.all_images[:jmax + 1]
tmp += toInterpolate[1:-1]
tmp.extend(self.all_images[jmax + 1:])
self.all_images = tmp
# Expect springs to be in equilibrium
k_tmp = self.k[:jmax]
k_tmp += [self.k[jmax] * (n_between + 1)] * (n_between + 1)
k_tmp.extend(self.k[jmax + 1:])
self.k = k_tmp
# Run the NEB calculation with the new image included
n_cur += n_between
# Determine if any images do not have a valid energy yet
energies = self.get_energies()
n_non_valid_energies = len([e for e in energies if e != e])
if self.world.rank == 0:
print('Start of evaluation of the initial images')
while n_non_valid_energies != 0:
if isinstance(self.k, (float, int)):
self.k = [self.k] * (len(self.all_images) - 1)
# First do one run since some energies are non-determined
to_run, climb_safe = self.which_images_to_run_on()
self.execute_one_neb(n_cur, to_run, climb=False)
energies = self.get_energies()
n_non_valid_energies = len([e for e in energies if e != e])
if self.world.rank == 0:
print('Finished initialisation phase.')
# Then add one image at a time until we have n_max images
while n_cur < self.n_max:
if isinstance(self.k, (float, int)):
self.k = [self.k] * (len(self.all_images) - 1)
# Insert a new image where the distance between two images
# is the largest OR where a higher energy reselution is needed
if self.world.rank == 0:
print('****Now adding another image until n_max is reached',
'({0}/{1})****'.format(n_cur, self.n_max))
spring_lengths = []
for j in range(n_cur - 1):
spring_vec = self.all_images[j + 1].get_positions() - \
self.all_images[j].get_positions()
spring_lengths.append(np.linalg.norm(spring_vec))
total_vec = self.all_images[0].get_positions() - \
self.all_images[-1].get_positions()
tl = np.linalg.norm(total_vec)
fR = max(spring_lengths) / tl
e = self.get_energies()
ed = []
emin = min(e)
enorm = max(e) - emin
for j in range(n_cur - 1):
delta_E = (e[j + 1] - e[j]) * (e[j + 1] + e[j] - 2 *
emin) / 2 / enorm
ed.append(abs(delta_E))
gR = max(ed) / enorm
if fR / gR > self.space_energy_ratio:
jmax = np.argmax(spring_lengths)
t = 'spring length!'
else:
jmax = np.argmax(ed)
t = 'energy difference between neighbours!'
if self.world.rank == 0:
print('Adding image between {0} and'.format(jmax),
'{0}. New image point is selected'.format(jmax + 1),
'on the basis of the biggest ' + t)
for i in range(n_cur):
if i <= jmax:
folder_from = self.iter_folder / f'iter_{self.iteration}' / f'{i}'
folder_to = self.iter_folder / f'iter_{self.iteration + 1}' / f'{i}'
else:
folder_from = self.iter_folder / f'iter_{self.iteration}' / f'{i}'
folder_to = self.iter_folder / f'iter_{self.iteration + 1}' / f'{i + 1}'
(self.iter_folder / f'iter_{self.iteration + 1}').mkdir(exist_ok=True)
shutil.copytree(folder_from, folder_to, dirs_exist_ok=True)
toInterpolate = [self.all_images[jmax]]
toInterpolate += [toInterpolate[0].copy()]
toInterpolate += [self.all_images[jmax + 1]]
neb = NEB(toInterpolate)
neb.interpolate(method=self.interpolate_method)
tmp = self.all_images[:jmax + 1]
tmp += toInterpolate[1:-1]
tmp.extend(self.all_images[jmax + 1:])
self.all_images = tmp
# Expect springs to be in equilibrium
k_tmp = self.k[:jmax]
k_tmp += [self.k[jmax] * 2] * 2
k_tmp.extend(self.k[jmax + 1:])
self.k = k_tmp
# Run the NEB calculation with the new image included
n_cur += 1
to_run, climb_safe = self.which_images_to_run_on()
self.execute_one_neb(n_cur, to_run, climb=False)
if self.world.rank == 0:
print('n_max images has been reached')
# Do a single climb around the top-point if requested
if self.climb:
if isinstance(self.k, (float, int)):
self.k = [self.k] * (len(self.all_images) - 1)
if self.world.rank == 0:
print('****Now doing the CI-NEB calculation****')
for i in range(n_cur):
folder_from = self.iter_folder / f'iter_{self.iteration}' / f'{i}'
folder_to = self.iter_folder / f'iter_{self.iteration + 1}' / f'{i}'
(self.iter_folder / f'iter_{self.iteration + 1}').mkdir(exist_ok=True)
shutil.copytree(folder_from, folder_to, dirs_exist_ok=True)
highest_energy_index = self.get_highest_energy_index()
nneb = highest_energy_index - 1 - self.n_simul // 2
nneb = max(nneb, 0)
nneb = min(nneb, n_cur - self.n_simul - 2)
to_run = list(range(nneb, nneb + self.n_simul + 2))
self.execute_one_neb(n_cur, to_run, climb=True, many_steps=True)
if not self.smooth_curve:
return self.all_images
# If a smooth_curve is requsted ajust the springs to follow two
# gaussian distributions
e = self.get_energies()
peak = self.get_highest_energy_index()
k_max = 10
d1 = np.linalg.norm(self.all_images[peak].get_positions() -
self.all_images[0].get_positions())
d2 = np.linalg.norm(self.all_images[peak].get_positions() -
self.all_images[-1].get_positions())
l1 = -d1 ** 2 / log(0.2)
l2 = -d2 ** 2 / log(0.2)
x1 = []
x2 = []
for i in range(peak):
v = (self.all_images[i].get_positions() +
self.all_images[i + 1].get_positions()) / 2 - \
self.all_images[0].get_positions()
x1.append(np.linalg.norm(v))
for i in range(peak, len(self.all_images) - 1):
v = (self.all_images[i].get_positions() +
self.all_images[i + 1].get_positions()) / 2 - \
self.all_images[0].get_positions()
x2.append(np.linalg.norm(v))
k_tmp = []
for x in x1:
k_tmp.append(k_max * exp(-((x - d1) ** 2) / l1))
for x in x2:
k_tmp.append(k_max * exp(-((x - d1) ** 2) / l2))
self.k = k_tmp
# Roll back to start from the top-point
if self.world.rank == 0:
print('Now moving from top to start')
for i in range(n_cur):
folder_from = self.iter_folder / f'iter_{self.iteration}' / f'{i}'
folder_to = self.iter_folder / f'iter_{self.iteration + 1}' / f'{i}'
(self.iter_folder / f'iter_{self.iteration + 1}').mkdir(exist_ok=True)
shutil.copytree(folder_from, folder_to, dirs_exist_ok=True)
highest_energy_index = self.get_highest_energy_index()
nneb = highest_energy_index - self.n_simul - 1
while nneb >= 0:
self.execute_one_neb(n_cur, range(nneb, nneb + self.n_simul + 2),
climb=False)
nneb -= 1
# Roll forward from the top-point until the end
nneb = self.get_highest_energy_index()
if self.world.rank == 0:
print('Now moving from top to end')
while nneb <= self.n_max - self.n_simul - 2:
self.execute_one_neb(n_cur, range(nneb, nneb + self.n_simul + 2),
climb=False)
nneb += 1
energies = self.get_energies()
print(f'Energies after iteration {self.iteration}: {energies}')
return self.all_images
def __initialize__(self):
'''Load files from the filesystem.'''
if not os.path.isfile(self.prefix / '000.traj'):
raise IOError('No file with name %s000.traj' % self.prefix,
'was found. Should contain initial image')
# Find the images that exist
index_exists = [i for i in range(self.n_max) if
os.path.isfile(self.prefix / f'{i:03d}.traj')]
print(f'Traj files with the following indexes were initially found: {index_exists}')
n_cur = index_exists[-1] + 1
if self.world.rank == 0:
print('The NEB initially has %d images ' % len(index_exists),
'(including the end-points)')
if len(index_exists) == 1:
raise Exception('Only a start point exists')
for i in range(len(index_exists)):
if i != index_exists[i]:
raise Exception('Files must be ordered sequentially',
'without gaps.')
if self.world.rank == 0:
for i in index_exists:
filename_ref = self.iter_trajpath(i, 0)
if os.path.isfile(filename_ref):
try:
os.rename(filename_ref, str(filename_ref) + '.bak')
except IOError:
pass
filename = self.prefix / f'{i:03d}.traj'
try:
shutil.copy2(filename, filename_ref)
except IOError:
pass
# Wait for file system on all nodes is syncronized
self.world.barrier()
# And now lets read in the configurations
for i in range(n_cur):
if i in index_exists:
filename = self.prefix / f'{i:03d}.traj'
newim = read(filename)
self.all_images.append(newim)
else:
self.all_images.append(self.all_images[0].copy())
self.iteration = 0
return n_cur
def get_energies(self):
"""Utility method to extract all energies and insert np.NaN at
invalid images."""
energies = []
for a in self.all_images:
try:
energies.append(a.get_potential_energy())
except RuntimeError:
energies.append(np.NaN)
return energies
def get_energies_one_image(self, image):
"""Utility method to extract energy of an image and return np.NaN
if invalid."""
try:
energy = image.get_potential_energy()
except RuntimeError:
energy = np.NaN
return energy
def get_highest_energy_index(self):
"""Find the index of the image with the highest energy."""
energies = self.get_energies()
valid_entries = [(i, e) for i, e in enumerate(energies) if e == e]
highest_energy_index = max(valid_entries, key=lambda x: x[1])[0]
return highest_energy_index
def which_images_to_run_on(self):
"""Determine which set of images to do a NEB at.
The priority is to first include all images without valid energies,
secondly include the highest energy image."""
n_cur = len(self.all_images)
energies = self.get_energies()
# Find out which image is the first one missing the energy and
# which is the last one missing the energy
first_missing = n_cur
last_missing = 0
n_missing = 0
for i in range(1, n_cur - 1):
if energies[i] != energies[i]:
n_missing += 1
first_missing = min(first_missing, i)
last_missing = max(last_missing, i)
# If all images missing the energy
if last_missing - first_missing + 1 == n_cur - 2:
return list(range(0, n_cur)), False
# Other options
else:
highest_energy_index = self.get_highest_energy_index()
nneb = highest_energy_index - 1 - self.n_simul // 2
nneb = max(nneb, 0)
nneb = min(nneb, n_cur - self.n_simul - 2)
nneb = min(nneb, first_missing - 1)
nneb = max(nneb + self.n_simul, last_missing) - self.n_simul
to_use = list(range(nneb, nneb + self.n_simul + 2))
while self.get_energies_one_image(self.all_images[to_use[0]]) != \
self.get_energies_one_image(self.all_images[to_use[0]]):
to_use[0] -= 1
while self.get_energies_one_image(self.all_images[to_use[-1]]) != \
self.get_energies_one_image(self.all_images[to_use[-1]]):
to_use[-1] += 1
return to_use, (highest_energy_index in to_use[1: -1])
class seriel_writer:
def __init__(self, traj, i, num):
self.traj = traj
self.i = i
self.num = num
def write(self):
if self.num % (self.i + 1) == 0:
self.traj.write()
def store_E_and_F_in_spc(self):
"""Collect the energies and forces on all nodes and store as
single point calculators"""
# Make sure energies and forces are known on all nodes
self.get_forces()
images = self.images
if self.parallel:
energy = np.empty(1)
forces = np.empty((self.natoms, 3))
for i in range(1, self.nimages - 1):
# Determine which node is the leading for image i
root = (i - 1) * self.world.size // (self.nimages - 2)
# If on this node, extract the calculated numbers
if self.world.rank == root:
forces = images[i].get_forces()
energy[0] = images[i].get_potential_energy()
# Distribute these numbers to other nodes
self.world.broadcast(energy, root)
self.world.broadcast(forces, root)
# On all nodes, remove the calculator, keep only energy
# and force in single point calculator
self.images[i].calc = SinglePointCalculator(
self.images[i],
energy=energy[0],
forces=forces)

View File

@@ -0,0 +1,219 @@
from __future__ import annotations
import tempfile
import numpy as np
from ase.calculators.calculator import Calculator
from echem.core.constants import Hartree2eV, Angstrom2Bohr, Bohr2Angstrom
from echem.core.useful_funcs import shell
from pathlib import Path
import logging
# Atomistic Simulation Environment (ASE) calculator interface for JDFTx
# See http://jdftx.org for JDFTx and https://wiki.fysik.dtu.dk/ase/ for ASE
# Authors: Deniz Gunceler, Ravishankar Sundararaman
# Modified: Vitaliy Kislenko
class JDFTx(Calculator):
def __init__(self,
path_jdftx_executable: str | Path,
path_rundir: str | Path | None = None,
commands: list[tuple[str, str]] = None,
jdftx_prefix: str = 'jdft',
output_name: str = 'output.out'):
self.logger = logging.getLogger(self.__class__.__name__ + ':')
if isinstance(path_jdftx_executable, str):
self.path_jdftx_executable = Path(path_jdftx_executable)
else:
self.path_jdftx_executable = path_jdftx_executable
if isinstance(path_rundir, str):
self.path_rundir = Path(path_rundir)
elif isinstance(path_rundir, Path):
self.path_rundir = path_rundir
elif path_rundir is None:
self.path_rundir = Path(tempfile.mkdtemp())
else:
raise ValueError(f'path_rundir should be str or Path or None, however you set {path_rundir=}'
f' with type {type(path_rundir)}')
self.jdftx_prefix = jdftx_prefix
self.output_name = output_name
self.dumps = []
self.input = [('dump-name', f'{self.jdftx_prefix}.$VAR'),
('initial-state', f'{self.jdftx_prefix}.$VAR')]
if commands is not None:
for com, val in commands:
if com == 'dump-name':
self.logger.debug(f'{self.path_rundir} You set \'dump-name\' command in commands = \'{val}\', '
f'however it will be replaced with \'{self.jdftx_prefix}.$VAR\'')
elif com == 'initial-state':
self.logger.debug(f'{self.path_rundir} You set \'initial-state\' command in commands = \'{val}\', '
f'however it will be replaced with \'{self.jdftx_prefix}.$VAR\'')
elif com == 'coords-type':
self.logger.debug(f'{self.path_rundir} You set \'coords-type\' command in commands = \'{val}\', '
f'however it will be replaced with \'cartesian\'')
elif com == 'include':
self.logger.debug(f'{self.path_rundir} \'include\' command is not supported, ignore it')
elif com == 'coulomb-interaction':
self.logger.debug(f'{self.path_rundir} \'coulomb-interaction\' command will be replaced in accordance with ase atoms')
elif com == 'dump':
self.addDump(val.split()[0], val.split()[1])
else:
self.addCommand(com, val)
if ('End', 'State') not in self.dumps:
self.addDump("End", "State")
if ('End', 'Forces') not in self.dumps:
self.addDump("End", "Forces")
if ('End', 'Ecomponents') not in self.dumps:
self.addDump("End", "Ecomponents")
# Current results
self.E = None
self.forces = None
# History
self.lastAtoms = None
self.lastInput = None
self.global_step = None
self.logger.debug(f'Successfully initialized JDFTx calculator in \'{self.path_rundir}\'')
def validCommand(self, command) -> bool:
"""Checks whether the input string is a valid jdftx command by comparing to the input template (jdft -t)"""
if type(command) != str:
raise IOError('Please enter a string as the name of the command!\n')
return True
def addCommand(self, cmd, v) -> None:
if not self.validCommand(cmd):
raise IOError(f'{cmd} is not a valid JDFTx command!\n'
'Look at the input file template (jdftx -t) for a list of commands.')
self.input.append((cmd, v))
def addDump(self, when, what) -> None:
self.dumps.append((when, what))
def __readEnergy(self,
filepath: str | Path) -> float:
Efinal = None
for line in open(filepath):
tokens = line.split()
if len(tokens) == 3:
Efinal = float(tokens[2])
if Efinal is None:
raise IOError('Error: Energy not found.')
return Efinal * Hartree2eV # Return energy from final line (Etot, F or G)
def __readForces(self,
filepath: str | Path) -> np.array:
idxMap = {}
symbolList = self.lastAtoms.get_chemical_symbols()
for i, symbol in enumerate(symbolList):
if symbol not in idxMap:
idxMap[symbol] = []
idxMap[symbol].append(i)
forces = [0] * len(symbolList)
for line in open(filepath):
if line.startswith('force '):
tokens = line.split()
idx = idxMap[tokens[1]].pop(0) # tokens[1] is chemical symbol
forces[idx] = [float(word) for word in tokens[2:5]] # tokens[2:5]: force components
if len(forces) == 0:
raise IOError('Error: Forces not found.')
return (Hartree2eV / Bohr2Angstrom) * np.array(forces)
def calculation_required(self, atoms, quantities) -> bool:
if (self.E is None) or (self.forces is None):
return True
if (self.lastAtoms != atoms) or (self.input != self.lastInput):
return True
return False
def get_forces(self, atoms) -> np.array:
if self.calculation_required(atoms, None):
self.update(atoms)
return self.forces
def get_potential_energy(self, atoms, force_consistent=False):
if self.calculation_required(atoms, None):
self.update(atoms)
return self.E
def update(self, atoms):
self.runJDFTx(self.constructInput(atoms))
def runJDFTx(self, inputfile):
""" Runs a JDFTx calculation """
file = open(self.path_rundir / 'input.in', 'w')
file.write(inputfile)
file.close()
if self.global_step is not None:
self.logger.info(f'Step: {self.global_step:2}. Run in {self.path_rundir}')
else:
self.logger.info(f'Run in {self.path_rundir}')
shell(f'cd {self.path_rundir} && srun {self.path_jdftx_executable} -i input.in -o {self.output_name}')
self.E = self.__readEnergy(self.path_rundir / f'{self.jdftx_prefix}.Ecomponents')
if self.global_step is not None:
self.logger.debug(f'Step: {self.global_step}. E = {self.E:.4f}')
else:
self.logger.debug(f'E = {self.E:.4f}')
self.forces = self.__readForces(self.path_rundir / f'{self.jdftx_prefix}.force')
def constructInput(self, atoms) -> str:
"""Constructs a JDFTx input string using the input atoms and the input file arguments (kwargs) in self.input"""
inputfile = ''
lattice = atoms.get_cell() * Angstrom2Bohr
inputfile += 'lattice \\\n'
for i in range(3):
for j in range(3):
inputfile += '%f ' % (lattice[j, i])
if i != 2:
inputfile += '\\'
inputfile += '\n'
inputfile += '\n'
inputfile += "".join(["dump %s %s\n" % (when, what) for when, what in self.dumps])
inputfile += '\n'
for cmd, v in self.input:
inputfile += '%s %s\n' % (cmd, str(v))
coords = [x * Angstrom2Bohr for x in list(atoms.get_positions())]
species = atoms.get_chemical_symbols()
inputfile += '\ncoords-type cartesian\n'
for i in range(len(coords)):
inputfile += 'ion %s %f %f %f \t 1\n' % (species[i], coords[i][0], coords[i][1], coords[i][2])
inputfile += '\ncoulomb-interaction '
pbc = list(atoms.get_pbc())
if sum(pbc) == 3:
inputfile += 'periodic\n'
elif sum(pbc) == 0:
inputfile += 'isolated\n'
elif sum(pbc) == 1:
inputfile += 'wire %i%i%i\n' % (pbc[0], pbc[1], pbc[2])
elif sum(pbc) == 2:
inputfile += 'slab %i%i%i\n' % (not pbc[0], not pbc[1], not pbc[2])
# --- add truncation center:
if sum(pbc) < 3:
center = np.mean(np.array(coords), axis=0)
inputfile += 'coulomb-truncation-embed %g %g %g\n' % tuple(center.tolist())
# Cache this calculation to history
self.lastAtoms = atoms.copy()
self.lastInput = list(self.input)
return inputfile

View File

@@ -0,0 +1,587 @@
from __future__ import annotations
from ase.mep.neb import NEB
from ase.optimize.sciopt import OptimizerConvergenceError
from ase.io.trajectory import Trajectory, TrajectoryWriter
from ase.io import read
from echem.neb.calculators import JDFTx
from echem.neb.autoneb import AutoNEB
from echem.io_data.jdftx import Ionpos, Lattice, Input
from echem.core.useful_funcs import shell
from pathlib import Path
import numpy as np
import logging
import os
from typing import Literal, Callable
logging.basicConfig(filename='logfile_NEB.log', filemode='a', level=logging.INFO,
format="%(asctime)s %(levelname)8s %(name)14s %(message)s",
datefmt='%d/%m/%Y %H:%M:%S')
class NEBOptimizer:
def __init__(self,
neb: NEB,
trajectory_filepath: str | Path | None = None,
append_trajectory: bool = True):
self.neb = neb
self.logger = logging.getLogger(self.__class__.__name__ + ':')
self.E_image_first = None
self.E_image_last = None
if trajectory_filepath is not None:
if append_trajectory:
self.trj_writer = TrajectoryWriter(trajectory_filepath, mode='a')
else:
self.trj_writer = TrajectoryWriter(trajectory_filepath, mode='w')
else:
self.trj_writer = None
def converged(self, fmax):
return self.neb.get_residual() <= fmax
def update_positions(self, X):
positions = X.reshape((self.neb.nimages - 2) * self.neb.natoms, 3)
self.neb.set_positions(positions)
def get_forces(self):
return self.neb.get_forces().reshape(-1)
def get_energies(self, first: bool = False, last: bool = False):
if not first and not last:
return [image.calc.E for image in self.neb.images[1:-1]]
elif first and not last:
return [image.calc.E for image in self.neb.images[:-1]]
elif not first and last:
return [image.calc.E for image in self.neb.images[1:]]
elif first and last:
return [image.calc.E for image in self.neb.images]
def dump_trajectory(self):
if self.trj_writer is not None:
for image in self.neb.images:
self.trj_writer.write(image)
def dump_positions_vasp(self, prefix='last_img'):
length = len(str(self.neb.nimages + 1))
for i, image in enumerate(self.neb.images):
image.write(f'{prefix}_{str(i).zfill(length)}.vasp', format='vasp')
def set_step_in_calculators(self, step, first: bool = False, last: bool = False):
if not first and not last:
for image in self.neb.images[1:-1]:
image.calc.global_step = step
elif first and not last:
for image in self.neb.images[:-1]:
image.calc.global_step = step
elif not first and last:
for image in self.neb.images[1:]:
image.calc.global_step = step
elif first and last:
for image in self.neb.images:
image.calc.global_step = step
def run_static(self,
fmax: float = 0.1,
max_steps: int = 100,
alpha: float = 0.02,
dE_max: float = None,
construct_calc_fn: Callable = None):
self.logger.info('Static method of optimization was chosen')
max_new_images_at_step = 1
min_steps_after_insertion = 3
steps_after_insertion = 0
if dE_max is not None:
self.logger.info(f'AutoNEB with max {dE_max} eV difference between images was set')
self.logger.info(f'Initial number of images is {self.neb.nimages}, '
f'including initial and final images')
if max_steps < 1:
raise ValueError('max_steps must be greater or equal than one')
if dE_max is not None:
self.set_step_in_calculators(0, first=True, last=True)
self.E_image_first = self.neb.images[0].get_potential_energy()
self.E_image_last = self.neb.images[-1].get_potential_energy()
length_step = len(str(max_steps))
#X = self.neb.get_positions().reshape(-1)
for step in range(max_steps):
self.dump_trajectory()
self.dump_positions_vasp(prefix=f'Step-{step}-1-')
if dE_max is not None:
self.set_step_in_calculators(step, first=True, last=True)
else:
self.set_step_in_calculators(step)
F = self.get_forces()
self.logger.info(f'Step: {step:{length_step}}. Energies = '
f'{[np.round(en, 4) for en in self.get_energies()]}')
R = self.neb.get_residual()
if R <= fmax:
self.logger.info(f'Step: {step:{length_step}}. Optimization terminates successfully. Residual R = {R:.4f}')
return True
else:
self.logger.info(f'Step: {step:{length_step}}. Residual R = {R:.4f}')
X = self.neb.get_positions().reshape(-1)
X += alpha * F
self.update_positions(X)
self.dump_positions_vasp(prefix=f'Step:-{step}-2-')
if dE_max is not None:
energies = self.get_energies(first=True, last=True)
self.logger.debug(f'Energies raw: {energies}')
self.logger.info(f'Step: {step:{length_step}}. Energies = '
f'{[np.round(en, 4) for en in energies]}')
diff = np.abs(np.diff(energies))
self.logger.debug(f'diff: {diff}')
idxs = np.where(diff > dE_max)[0]
self.logger.debug(f'Idxs where diff > dE_max: {idxs}')
if len(idxs) > max_new_images_at_step:
idxs = np.flip(np.argsort(diff))[:max_new_images_at_step]
self.logger.debug(f'Images will be added for idxs {idxs} since more than {max_new_images_at_step} '
f'diffs were large than {dE_max} eV')
if (len(idxs) > 0) and (steps_after_insertion > min_steps_after_insertion):
steps_after_insertion = -1
for idx in reversed(idxs):
self.logger.debug(f'Start working with idx: {idx}')
length_prev = len(str(len(self.neb.images) - 1))
length_new = len(str(len(self.neb.images)))
self.logger.debug(f'{length_prev=} {length_new=}')
tmp_images = [self.neb.images[idx].copy(),
self.neb.images[idx].copy(),
self.neb.images[idx + 1].copy()]
tmp_neb = NEB(tmp_images)
tmp_neb.interpolate()
images_new = self.neb.images.copy()
images_new.insert(idx + 1, tmp_neb.images[1])
energies = self.get_energies(first=True, last=True)
energies.insert(idx + 1, None)
self.neb = NEB(images_new,
k=self.neb.k[0],
climb=self.neb.climb)
self.dump_positions_vasp(prefix=f'Step: {step} 3 ')
zfill_length = len(str(len(self.neb.images)))
for k, image in enumerate(self.neb.images):
self.logger.debug(f'Trying to attach the calc to {k} image '
f'with the length: {len(str(len(self.neb.images)))}')
image.calc = construct_calc_fn(str(k).zfill(zfill_length))
image.calc.E = energies[k]
if length_prev != length_new:
self.logger.debug('Trying to rename due to the change in length')
for i in range(0, self.neb.nimages):
shell(f'mv {str(i).zfill(length_prev)} {str(i).zfill(length_new)}')
self.logger.debug(f'Trying to execute the following command: '
f'mv {str(i).zfill(length_prev)} {str(i + 1).zfill(length_new)}')
self.logger.debug('Trying to rename due to the insertion')
for i in range(len(self.neb.images) - 2, idx, -1):
self.logger.debug(f'{i=}')
self.logger.debug(f'Trying to execute the following command: '
f'mv {str(i).zfill(length_new)} {str(i + 1).zfill(length_new)}')
shell(f'mv {str(i).zfill(length_new)} {str(i + 1).zfill(length_new)}')
self.logger.debug(f'Trying to create the new folder: {str(idx + 1).zfill(length_new)}')
folder = Path(str(idx + 1).zfill(length_new))
folder.mkdir()
self.dump_positions_vasp(prefix=f'Step: {step} 4 ')
steps_after_insertion += 1
self.logger.debug(f'Step after insertion: {steps_after_insertion}')
self.logger.warning(f'convergence was not achieved after max iterations = {max_steps}, '
f'residual R = {R:.4f} > {fmax}')
return False
def run_ode(self,
fmax: float = 0.1,
max_steps: int = 100,
C1: float = 1e-2,
C2: float = 2.0,
extrapolation_scheme: Literal[1, 2, 3] = 3,
h: float | None = None,
h_min: float = 1e-10,
R_max: float = 1e3,
rtol: float = 0.1):
"""
fmax : float
convergence tolerance for residual force
max_steps : int
maximum number of steps
C1 : float
sufficient contraction parameter
C2 : float
residual growth control (Inf means there is no control)
extrapolation_scheme : int
extrapolation style (3 seems the most robust)
h : float
initial step size, if None an estimate is used based on ODE12
h_min : float
minimal allowed step size
R_max: float
terminate if residual exceeds this value
rtol : float
relative tolerance
"""
if max_steps < 2:
raise ValueError('max_steps must be greater or equal than two')
length = len(str(max_steps))
self.set_step_in_calculators(0)
F = self.get_forces()
self.logger.info(f'Step: {0:{length}}. Energies = {[np.round(en, 4) for en in self.get_energies()]}')
R = self.neb.get_residual() # pick the biggest force
if R >= R_max:
self.logger.info(f'Step: {0:{length}}. Residual {R:.4f} >= R_max {R_max}')
raise OptimizerConvergenceError(f'Step: 0. Residual {R:.4f} >= R_max {R_max}')
else:
self.logger.info(f'Step: {0:{length}}. Residual R = {R:.4f}')
if h is None:
h = 0.5 * rtol ** 0.5 / R # Chose a step size based on that force
h = max(h, h_min) # Make sure the step size is not too big
self.logger.info(f'Step: {0:{length}}. Step size h = {h}')
X = self.neb.get_positions().reshape(-1)
for step in range(1, max_steps):
X_new = X + h * F # Pick a new position
self.update_positions(X_new)
self.set_step_in_calculators(step)
F_new = self.get_forces() # Calculate the new forces at this position
self.logger.info(f'Step: {step:{length}}. Energies = {[np.round(en, 4) for en in self.get_energies()]}')
R_new = self.neb.get_residual()
self.logger.info(f'Step: {step:{length}}. At new coordinates R = {R:.4f} -> R_new = {R_new:.4f}')
e = 0.5 * h * (F_new - F) # Estimate the area under the forces curve
err = np.linalg.norm(e, np.inf) # Error estimate
# Accept step if residual decreases sufficiently and/or error acceptable
condition_1 = R_new <= R * (1 - C1 * h)
condition_2 = R_new <= R * C2
condition_3 = err <= rtol
accept = condition_1 or (condition_2 and condition_3)
self.logger.info(f'Step: {step:{length}}. {"R_new <= R * (1 - C1 * h)":26} \t is {condition_1}')
self.logger.info(f'Step: {step:{length}}. {"R_new <= R * C2":26} is {condition_2}')
self.logger.info(f'Step: {step:{length}}. {"err <= rtol":26} is {condition_3}')
# Pick an extrapolation scheme for the system & find new increment
y = F - F_new
if extrapolation_scheme == 1: # F(xn + h Fp)
h_ls = h * (F @ y) / (y @ y)
elif extrapolation_scheme == 2: # F(Xn + h Fp)
h_ls = h * (F @ F_new) / (F @ y + 1e-10)
elif extrapolation_scheme == 3: # min | F(Xn + h Fp) |
h_ls = h * (F @ y) / (y @ y + 1e-10)
else:
raise ValueError(f'Invalid extrapolation_scheme: {extrapolation_scheme}. Must be 1, 2 or 3')
if np.isnan(h_ls) or h_ls < h_min: # Rejects if increment is too small
h_ls = np.inf
h_err = h * 0.5 * np.sqrt(rtol / err)
if accept:
self.logger.info(f'Step: {step:{length}}. The displacement is accepted')
X = X_new
R = R_new
F = F_new
self.dump_trajectory()
self.dump_positions_vasp()
# We check the residuals again
if self.converged(fmax):
self.logger.info(f"Step: {step:{length}}. Optimization terminates successfully")
return True
if R > R_max:
self.logger.info(f"Step: {step:{length}}. Optimization fails, R = {R:.4f} > R_max = {R_max}")
return False
# Compute a new step size.
# Based on the extrapolation and some other heuristics
h = max(0.25 * h, min(4 * h, h_err, h_ls)) # Log steep-size analytic results
self.logger.info(f'Step: {step:{length}}. New step size h = {h}')
else:
self.logger.info(f'Step: {step:{length}}. The displacement is rejected')
h = max(0.1 * h, min(0.25 * h, h_err, h_ls))
self.logger.info(f'Step: {step:{length}}. New step size h = {h}')
if abs(h) < h_min: # abort if step size is too small
self.logger.info(f'Step: {step:{length}}. Stop optimization since step size h = {h} < h_min = {h_min}')
return True
self.logger.warning(f'Step: {step:{length}}. Convergence was not achieved after max iterations = {max_steps}')
return True
class NEB_JDFTx:
def __init__(self,
path_jdftx_executable: str | Path,
nimages: int = 5,
input_filepath: str | Path = 'input.in',
output_name: str = 'output.out',
input_format: Literal['jdftx', 'vasp'] = 'jdftx',
cNEB: bool = True,
spring_constant: float = 5.0,
interpolation_method: Literal['linear', 'idpp'] = 'idpp',
restart: Literal[False, 'from_traj', 'from_vasp'] = False,
dE_max: float = None):
if isinstance(path_jdftx_executable, str):
self.path_jdftx_executable = Path(path_jdftx_executable)
else:
self.path_jdftx_executable = path_jdftx_executable
if isinstance(input_filepath, str):
input_filepath = Path(input_filepath)
self.jdftx_input = Input.from_file(input_filepath)
self.nimages = nimages
self.path_rundir = Path.cwd()
self.output_name = output_name
self.input_format = input_format.lower()
self.cNEB = cNEB
self.restart = restart
self.spring_constant = spring_constant
self.interpolation_method = interpolation_method.lower()
self.dE_max = dE_max
self.optimizer = None
self.logger = logging.getLogger(self.__class__.__name__ + ':')
def prepare(self):
length = len(str(self.nimages + 1))
if self.restart is False:
if self.input_format == 'jdftx':
init_ionpos = Ionpos.from_file('init.ionpos')
init_lattice = Lattice.from_file('init.lattice')
final_ionpos = Ionpos.from_file('final.ionpos')
final_lattice = Lattice.from_file('final.lattice')
init_poscar = init_ionpos.convert('vasp', init_lattice)
init_poscar.to_file('init.vasp')
final_poscar = final_ionpos.convert('vasp', final_lattice)
final_poscar.to_file('final.vasp')
initial = read('init.vasp', format='vasp')
final = read('final.vasp', format='vasp')
images = [initial]
images += [initial.copy() for _ in range(self.nimages)]
images += [final]
neb = NEB(images,
k=self.spring_constant,
climb=self.cNEB)
neb.interpolate(method=self.interpolation_method)
for i, image in enumerate(images):
image.write(f'start_img_{str(i).zfill(length)}.vasp', format='vasp')
else:
images = []
if self.restart == 'from_traj':
trj = Trajectory('NEB_trajectory.traj')
n_iter = int(len(trj) / (self.nimages + 2))
for i in range(self.nimages + 2):
trj[(n_iter - 1) * (self.nimages + 2) + i].write(f'start_img_{str(i).zfill(length)}.vasp',
format='vasp')
trj.close()
if self.restart == 'from_traj' or self.restart == 'from_vasp':
for i in range(self.nimages + 2):
img = read(f'start_img_{str(i).zfill(length)}.vasp', format='vasp')
images.append(img)
else:
raise ValueError(f'restart must be False or \'from_traj\', '
f'or \'from_vasp\' but you set {self.restart=}')
neb = NEB(images,
k=self.spring_constant,
climb=self.cNEB)
for i in range(self.nimages):
folder = Path(str(i+1).zfill(length))
folder.mkdir(exist_ok=True)
if self.dE_max is not None:
self.logger.debug(f'Trying to create the folder {str(0).zfill(length)}')
folder = Path(str(0).zfill(length))
folder.mkdir(exist_ok=True)
self.logger.debug(f'Trying to create the folder {str(self.nimages + 1).zfill(length)}')
folder = Path(str(self.nimages + 1).zfill(length))
folder.mkdir(exist_ok=True)
for i, image in enumerate(images[1:-1]):
image.calc = JDFTx(self.path_jdftx_executable,
path_rundir=self.path_rundir / str(i+1).zfill(length),
commands=self.jdftx_input.commands)
if self.dE_max is not None:
images[0].calc = JDFTx(self.path_jdftx_executable,
path_rundir=self.path_rundir / str(0).zfill(length),
commands=self.jdftx_input.commands)
images[-1].calc = JDFTx(self.path_jdftx_executable,
path_rundir=self.path_rundir / str(self.nimages + 1).zfill(length),
commands=self.jdftx_input.commands)
self.optimizer = NEBOptimizer(neb=neb,
trajectory_filepath='NEB_trajectory.traj')
def run(self,
fmax: float = 0.1,
method: Literal['ode', 'static'] = 'ode',
max_steps: int = 100,
**kwargs):
self.prepare()
if self.dE_max is not None:
def calc_fn(folder_name) -> JDFTx:
return JDFTx(self.path_jdftx_executable,
path_rundir=self.path_rundir / folder_name,
commands=self.jdftx_input.commands)
else:
calc_fn = None
if method == 'ode':
self.optimizer.run_ode(fmax, max_steps)
elif method == 'static':
self.optimizer.run_static(fmax, max_steps, dE_max=self.dE_max, construct_calc_fn=calc_fn)
else:
raise ValueError(f'Method must be ode or static but you set {method=}')
class AutoNEB_JDFTx:
"""
Class for running AutoNEB with JDFTx calculator
Parameters:
prefix: string or Path
path to folder with initial files. Basically could be os.getcwd()
In this folder required:
1) init.vasp file with initial configuration
2) final.vasp file with final configuration
3) in file with JDFTx calculation parameters
path_jdftx_executable: string or Path
path to jdftx executable
n_start: int
Starting number of images between starting and final for NEB
n_max: int
Maximum number of images, including starting and final
climb: boolean
Whether it is necessary to use cNEB or not
fmax: float or list of floats
The maximum force along the NEB path
maxsteps: int
The maximum number of steps in each NEB relaxation.
If a list is given the first number of steps is used in the build-up
and final scan phase;
the second number of steps is used in the CI step after all images
have been inserted.
k: float
The spring constant along the NEB path
method: str (see neb.py)
Choice betweeen three method:
'aseneb', standard ase NEB implementation
'improvedtangent', published NEB implementation
'eb', full spring force implementation (default)
optimizer: str or object
Set optimizer for NEB: FIRE, BFGS or NEB
space_energy_ratio: float
The preference for new images to be added in a big energy gab
with a preference around the peak or in the biggest geometric gab.
A space_energy_ratio set to 1 will only considder geometric gabs
while one set to 0 will result in only images for energy
resolution.
interpolation_method: string
method for interpolation
smooth_curve: boolean
"""
def __init__(self,
prefix,
path_jdftx_executable,
n_start=3,
n_simul=3,
n_max=10,
climb=True,
fmax=0.05,
maxsteps=100,
k=0.1,
restart=False,
method='eb',
optimizer='FIRE',
space_energy_ratio=0.5,
interpolation_method='idpp',
smooth_curve=False):
self.restart = restart
self.path_jdftx_executable = path_jdftx_executable
self.prefix = Path(prefix)
self.n_start = n_start
self.n_max = n_max
self.commands = Input.from_file(Path(prefix) / 'in').commands
self.interpolation_method = interpolation_method
self.autoneb = AutoNEB(self.attach_calculators,
prefix=prefix,
n_simul=n_simul,
n_max=n_max,
climb=climb,
fmax=fmax,
maxsteps=maxsteps,
k=k,
method=method,
space_energy_ratio=space_energy_ratio,
world=None, parallel=False, smooth_curve=smooth_curve,
interpolate_method=interpolation_method, optimizer=optimizer)
def prepare(self):
if not self.restart:
initial = read(self.prefix / 'init.vasp', format='vasp')
final = read(self.prefix / 'final.vasp', format='vasp')
images = [initial]
if self.n_start != 0:
images += [initial.copy() for _ in range(self.n_start)]
images += [final]
if self.n_start != 0:
neb = NEB(images)
neb.interpolate(method=self.interpolation_method)
for i, image in enumerate(images):
image.write(self.prefix / f'{i:03d}.traj', format='traj')
image.write(self.prefix / f'{i:03d}.vasp', format='vasp')
else:
index_exists = [i for i in range(self.n_max) if
os.path.isfile(self.prefix / f'{i:03d}.traj')]
for i in index_exists:
image = Trajectory(self.prefix / f'{i:03d}.traj')
image[-1].write(self.prefix / f'{i:03d}.vasp', format='vasp')
img = read(self.prefix / f'{i:03d}.vasp', format='vasp')
img.write(self.prefix / f'{i:03d}.traj', format='traj')
def attach_calculators(self, images, indexes, iteration):
for image, index in zip(images, indexes):
path_rundir = self.autoneb.iter_folder / f'iter_{iteration}' / str(index)
path_rundir.mkdir(exist_ok=True)
image.calc = JDFTx(self.path_jdftx_executable,
path_rundir=path_rundir,
commands=self.commands)
def run(self):
self.prepare()
self.autoneb.run()

View File

@@ -0,0 +1,701 @@
from __future__ import annotations
from pathlib import Path
from typing_extensions import Required, NotRequired, TypedDict
from echem.io_data.jdftx import VolumetricData, Output, Lattice, Ionpos, Eigenvals, Fillings, kPts, DOS
from echem.io_data.ddec import Output_DDEC
from echem.io_data.bader import ACF
from echem.core.constants import Hartree2eV, eV2Hartree, Bohr2Angstrom, Angstrom2Bohr, \
Bader_radii_Bohr, IDSCRF_radii_Angstrom
from echem.core.electronic_structure import EBS
from monty.re import regrep
from subprocess import Popen, PIPE
from timeit import default_timer as timer
from datetime import timedelta
import matplotlib.pyplot as plt
import shutil
import re
import numpy as np
from nptyping import NDArray, Shape, Number
from typing import Literal
from tqdm.autonotebook import tqdm
from termcolor import colored
from threading import Lock
from concurrent.futures import ThreadPoolExecutor
class System(TypedDict):
substrate: str
adsorbate: str
idx: int
output: Output | None
nac_ddec: Output_DDEC | None
output_phonons: Output | None
dos: EBS | None
nac_bader: ACF | None
excluded_volumes: dict[Literal['cavity', 'molecule', 'free'], float] | None
class DDEC_params(TypedDict):
path_atomic_densities: Required[str]
path_ddec_executable: NotRequired[str]
input_filename: NotRequired[str]
periodicity: NotRequired[tuple[bool]]
charge_type: NotRequired[str]
compute_BOs: NotRequired[bool]
number_of_core_electrons: NotRequired[list[list[int]]]
class InfoExtractor:
def __init__(self,
ddec_params: DDEC_params = None,
path_bader_executable: Path | str = None,
path_arvo_executable: Path | str = None,
systems: list[System] = None,
output_name: str = 'output.out',
jdftx_prefix: str = 'jdft',
do_ddec: bool = False,
do_bader: bool = False):
if ddec_params is not None:
if do_ddec and 'path_ddec_executable' not in ddec_params:
raise ValueError('"path_ddec_executable" must be specified in ddec_params if do_ddec=True')
elif do_ddec:
raise ValueError('"ddec_params" mist be specified if do_ddec=True')
if systems is None:
self.systems = []
if isinstance(path_bader_executable, str):
path_bader_executable = Path(path_bader_executable)
if isinstance(path_arvo_executable, str):
path_arvo_executable = Path(path_arvo_executable)
self.output_name = output_name
self.jdftx_prefix = jdftx_prefix
self.do_ddec = do_ddec
self.ddec_params = ddec_params
self.do_bader = do_bader
self.path_bader_executable = path_bader_executable
self.path_arvo_executable = path_arvo_executable
self.lock = Lock()
def create_job_control(self,
filepath: str | Path,
charge: float,
ddec_params: DDEC_params):
if isinstance(filepath, str):
filepath = Path(filepath)
if 'path_atomic_densities' in ddec_params:
path_atomic_densities = ddec_params['path_atomic_densities']
else:
raise ValueError('"path_atomic_densities" must be specified in ddec_params dict')
if 'input_filename' in ddec_params:
input_filename = ddec_params['input_filename']
else:
input_filename = None
if 'periodicity' in ddec_params:
periodicity = ddec_params['periodicity']
else:
periodicity = (True, True, True)
if 'charge_type' in ddec_params:
charge_type = ddec_params['charge_type']
else:
charge_type = 'DDEC6'
if 'compute_BOs' in ddec_params:
compute_BOs = ddec_params['compute_BOs']
else:
compute_BOs = True
if 'number_of_core_electrons' in ddec_params:
number_of_core_electrons = ddec_params['number_of_core_electrons']
else:
number_of_core_electrons = None
job_control = open(filepath, 'w')
job_control.write('<net charge>\n')
job_control.write(f'{charge}\n')
job_control.write('</net charge>\n\n')
job_control.write('<atomic densities directory complete path>\n')
job_control.write(path_atomic_densities + '\n')
job_control.write('</atomic densities directory complete path>\n\n')
if input_filename is not None:
job_control.write('<input filename>\n')
job_control.write(input_filename + '\n')
job_control.write('</input filename>\n\n')
job_control.write('<periodicity along A, B, and C vectors>\n')
for p in periodicity:
if p:
job_control.write('.true.\n')
else:
job_control.write('.false.\n')
job_control.write('</periodicity along A, B, and C vectors>\n\n')
job_control.write('<charge type>\n')
job_control.write(charge_type + '\n')
job_control.write('</charge type>\n\n')
job_control.write('<compute BOs>\n')
if compute_BOs:
job_control.write('.true.\n')
else:
job_control.write('.false.\n')
job_control.write('</compute BOs>\n')
if number_of_core_electrons is not None:
job_control.write('<number of core electrons>\n')
for i in number_of_core_electrons:
job_control.write(f'{i[0]} {i[1]}\n')
job_control.write('</number of core electrons>\n')
job_control.close()
def check_out_outvib_sameness(self):
for system in self.systems:
if system['output'] is not None and system['output_phonons'] is not None:
if not system['output'].structure == system['output_phonons'].structure:
print(colored('System:', color='red'),
colored(' '.join((system['substrate'], system['adsorbate'], str(system['idx']))),
color='red', attrs=['bold']),
colored('has output and phonon output for different systems'))
def extract_info_multiple(self,
path_root_folder: str | Path,
recreate_files: dict[Literal['bader', 'ddec', 'cars', 'cubes', 'volumes'], bool] = None,
num_workers: int = 1,
parse_folders_names=True) -> None:
if isinstance(path_root_folder, str):
path_root_folder = Path(path_root_folder)
subfolders = [f for f in path_root_folder.rglob('*') if f.is_dir()]
depth = max([len(f.parents) for f in subfolders])
subfolders = [f for f in subfolders if len(f.parents) == depth]
with tqdm(total=len(subfolders)) as pbar:
with ThreadPoolExecutor(num_workers) as executor:
for _ in executor.map(self.extract_info, subfolders, [recreate_files] * len(subfolders), \
[parse_folders_names] * len(subfolders)):
pbar.update()
def extract_info(self,
path_root_folder: str | Path,
recreate_files: dict[Literal['bader', 'ddec', 'cars', 'cubes', 'volumes'], bool] = None,
parse_folders_names: bool = True) -> None:
if isinstance(path_root_folder, str):
path_root_folder = Path(path_root_folder)
if recreate_files is None:
recreate_files = {'bader': False, 'ddec': False, 'cars': False, 'cubes': False, 'volumes': False}
else:
if 'bader' not in recreate_files:
recreate_files['bader'] = False
if 'ddec' not in recreate_files:
recreate_files['ddec'] = False
if 'cars' not in recreate_files:
recreate_files['cars'] = False
if 'cubes' not in recreate_files:
recreate_files['cubes'] = False
if 'volumes' not in recreate_files:
recreate_files['volumes'] = False
files = [file.name for file in path_root_folder.iterdir() if file.is_file()]
if parse_folders_names:
substrate, adsorbate, idx, *_ = path_root_folder.name.split('_')
idx = int(idx)
if 'vib' in _:
is_vib_folder = True
else:
is_vib_folder = False
if 'bad' in _:
return None
if is_vib_folder:
output_phonons = Output.from_file(path_root_folder / self.output_name)
if (output_phonons.phonons['zero'] is not None and any(output_phonons.phonons['zero'] > 1e-5)) or \
(output_phonons.phonons['imag'] is not None and any(
np.abs(output_phonons.phonons['imag']) > 1e-5)):
print(colored(str(path_root_folder), color='yellow', attrs=['bold']))
if output_phonons.phonons['zero'] is not None:
string = '['
for i in output_phonons.phonons['zero']:
if i.imag != 0:
string += str(i.real) + '+' + colored(str(i.imag) + 'j', color='yellow',
attrs=['bold']) + ', '
else:
string += str(i.real) + '+' + str(i.imag) + 'j, '
string = string[:-2]
string += ']'
print(f'\t{len(output_phonons.phonons["zero"])} zero modes: {string}')
if output_phonons.phonons['imag'] is not None:
string = '['
for i in output_phonons.phonons['imag']:
if i.imag != 0:
string += str(i.real) + '+' + colored(str(i.imag) + 'j', color='yellow',
attrs=['bold']) + ', '
else:
string += str(i.real) + '+' + str(i.imag) + 'j, '
string = string[:-2]
string += ']'
print(f'\t{len(output_phonons.phonons["imag"])} imag modes: {string}')
output = None
else:
output = Output.from_file(path_root_folder / self.output_name)
output_phonons = None
else:
is_vib_folder = False
output = Output.from_file(path_root_folder / self.output_name)
if not is_vib_folder:
if 'POSCAR' not in files or recreate_files['cars']:
print('Create POSCAR for\t\t\t\t\t', colored(str(path_root_folder), attrs=['bold']))
poscar = output.get_poscar()
poscar.to_file(path_root_folder / 'POSCAR')
if 'CONTCAR' not in files or recreate_files['cars']:
print('Create CONTCAR for\t\t\t\t\t', colored(str(path_root_folder), attrs=['bold']))
contcar = output.get_contcar()
contcar.to_file(path_root_folder / 'CONTCAR')
if 'XDATCAR' not in files or recreate_files['cars']:
print('Create XDATCAR for\t\t\t\t\t', colored(str(path_root_folder), attrs=['bold']))
xdatcar = output.get_xdatcar()
xdatcar.to_file(path_root_folder / 'XDATCAR')
fft_box_size = output.fft_box_size
if 'output_volumetric.out' in files:
files.remove('output_volumetric.out')
patterns = {'fft_box_size': r'Chosen fftbox size, S = \[(\s+\d+\s+\d+\s+\d+\s+)\]'}
matches = regrep(str(path_root_folder / 'output_volumetric.out'), patterns)
fft_box_size = np.array([int(i) for i in matches['fft_box_size'][0][0][0].split()])
if 'valence_density.cube' not in files or recreate_files['cubes']:
print('Create valence(spin)_density for\t', colored(str(path_root_folder), attrs=['bold']))
if f'{self.jdftx_prefix}.n_up' in files and f'{self.jdftx_prefix}.n_dn' in files:
n_up = VolumetricData.from_file(path_root_folder / f'{self.jdftx_prefix}.n_up',
fft_box_size,
output.structure).convert_to_cube()
n_dn = VolumetricData.from_file(path_root_folder / f'{self.jdftx_prefix}.n_dn',
fft_box_size,
output.structure).convert_to_cube()
n = n_up + n_dn
n.to_file(path_root_folder / 'valence_density.cube')
valence_density_exist = True
if output.magnetization_abs > 1e-2:
n = n_up - n_dn
n.to_file(path_root_folder / 'spin__density.cube')
elif f'{self.jdftx_prefix}.n' in files:
n = VolumetricData.from_file(path_root_folder / f'{self.jdftx_prefix}.n',
fft_box_size,
output.structure).convert_to_cube()
n.to_file(path_root_folder / 'valence_density.cube')
valence_density_exist = True
else:
print(colored('(!) There is no files for valence(spin)_density.cube creation',
color='red', attrs=['bold']))
valence_density_exist = False
else:
valence_density_exist = True
if ('nbound.cube' not in files or recreate_files['cubes']) and f'{self.jdftx_prefix}.nbound' in files:
print('Create nbound.cube for\t\t\t\t', colored(str(path_root_folder), attrs=['bold']))
nbound = VolumetricData.from_file(path_root_folder / f'{self.jdftx_prefix}.nbound',
fft_box_size, output.structure).convert_to_cube()
nbound.to_file(path_root_folder / 'nbound.cube')
for file in files:
if file.startswith(f'{self.jdftx_prefix}.fluidN_'):
fluid_type = file.removeprefix(self.jdftx_prefix + '.')
if f'{fluid_type}.cube' not in files or recreate_files['cubes']:
print(f'Create {fluid_type}.cube for\t', colored(str(path_root_folder), attrs=['bold']))
fluidN = VolumetricData.from_file(path_root_folder / file,
fft_box_size,
output.structure).convert_to_cube()
fluidN.to_file(path_root_folder / (fluid_type + '.cube'))
if self.ddec_params is not None and ('job_control.txt' not in files or recreate_files['ddec']):
print('Create job_control.txt for\t\t\t', colored(str(path_root_folder), attrs=['bold']))
charge = - (output.nelec_hist[-1] - output.nelec_pzc)
self.create_job_control(filepath=path_root_folder / 'job_control.txt',
charge=charge,
ddec_params=self.ddec_params)
if 'ACF.dat' in files and not recreate_files['bader']:
nac_bader = ACF.from_file(path_root_folder / 'ACF.dat')
nac_bader.nelec_per_isolated_atom = np.array([output.pseudopots[key] for key in
output.structure.species])
elif self.do_bader and valence_density_exist:
print('Run Bader for\t\t\t\t\t\t', colored(str(path_root_folder), attrs=['bold']))
if parse_folders_names:
string = str(path_root_folder.name.split('_')[1])
print_com = ''
if string != 'Pristine':
print_com += ' -o atoms'
length = len(re.findall(r'[A-Z]', string))
ints = [int(i) for i in re.findall(r'[2-9]', re.sub(r'minus\d+.\d+|plus\d+.\d+', '', string))]
length += sum(ints) - len(ints)
while length > 0:
print_com += f' -i {output.structure.natoms + 1 - length}'
length -= 1
spin_com = ''
if f'{self.jdftx_prefix}.n_up' in files and \
f'{self.jdftx_prefix}.n_dn' in files and \
output.magnetization_abs > 1e-2:
spin_com = ' -s ' + str(path_root_folder / 'spin__density.cube')
else:
spin_com = ''
print_com = ''
com = str(self.path_bader_executable) + ' -t cube' + \
print_com + spin_com + ' ' + str(path_root_folder / 'valence_density.cube')
p = Popen(com, cwd=path_root_folder)
p.wait()
nac_bader = ACF.from_file(path_root_folder / 'ACF.dat')
nac_bader.nelec_per_isolated_atom = np.array([output.pseudopots[key] for key in
output.structure.species])
else:
nac_bader = None
if not recreate_files['ddec'] and 'valence_cube_DDEC_analysis.output' in files:
nac_ddec = Output_DDEC.from_file(path_root_folder / 'valence_cube_DDEC_analysis.output')
elif self.ddec_params is not None and self.do_ddec and valence_density_exist:
print('Run DDEC for\t\t\t\t\t\t', colored(str(path_root_folder), attrs=['bold']))
start = timer()
p = Popen(str(self.ddec_params['path_ddec_executable']), stdin=PIPE, bufsize=0)
p.communicate(str(path_root_folder).encode('ascii'))
end = timer()
print(f'DDEC Finished! Elapsed time: {str(timedelta(seconds=end-start)).split(".")[0]}',
colored(str(path_root_folder), attrs=['bold']))
nac_ddec = Output_DDEC.from_file(path_root_folder / 'valence_cube_DDEC_analysis.output')
else:
nac_ddec = None
if f'{self.jdftx_prefix}.eigenvals' in files and f'{self.jdftx_prefix}.kPts' in files:
eigs = Eigenvals.from_file(path_root_folder / f'{self.jdftx_prefix}.eigenvals',
output)
kpts = kPts.from_file(path_root_folder / f'{self.jdftx_prefix}.kPts')
if f'{self.jdftx_prefix}.fillings' in files:
occs = Fillings.from_file(path_root_folder / f'{self.jdftx_prefix}.fillings',
output).occupations
else:
occs = None
dos = DOS(eigenvalues=eigs.eigenvalues * Hartree2eV,
weights=kpts.weights,
efermi=output.mu * Hartree2eV,
occupations=occs)
else:
dos = None
if f'output_phonon.out' in files:
output_phonons = Output.from_file(path_root_folder / 'output_phonon.out')
if (output_phonons.phonons['zero'] is not None and any(output_phonons.phonons['zero'] > 1e-5)) or \
(output_phonons.phonons['imag'] is not None and any(
np.abs(output_phonons.phonons['imag']) > 1e-5)):
print(colored(str(path_root_folder), color='yellow', attrs=['bold']))
if output_phonons.phonons['zero'] is not None:
string = '['
for i in output_phonons.phonons['zero']:
if i.imag != 0:
string += str(i.real) + '+' + colored(str(i.imag) + 'j', color='yellow',
attrs=['bold']) + ', '
else:
string += str(i.real) + '+' + str(i.imag) + 'j, '
string = string[:-2]
string += ']'
print(f'\t{len(output_phonons.phonons["zero"])} zero modes: {string}')
if output_phonons.phonons['imag'] is not None:
string = '['
for i in output_phonons.phonons['imag']:
if i.imag != 0:
string += str(i.real) + '+' + colored(str(i.imag) + 'j', color='yellow',
attrs=['bold']) + ', '
else:
string += str(i.real) + '+' + str(i.imag) + 'j, '
string = string[:-2]
string += ']'
print(f'\t{len(output_phonons.phonons["imag"])} imag modes: {string}')
if parse_folders_names and substrate == 'Mol':
if 'bader.ats' not in files or recreate_files['volumes']:
file = open(path_root_folder / 'bader.ats', 'w')
for name, coord in zip(output.structure.species, output.structure.coords):
file.write(f' {coord[0]} {coord[1]} {coord[2]} {Bader_radii_Bohr[name] * Bohr2Angstrom}\n')
file.close()
if 'idscrf.ats' not in files or recreate_files['volumes']:
file = open(path_root_folder / 'idscrf.ats', 'w')
for name, coord in zip(output.structure.species, output.structure.coords):
file.write(f' {coord[0]} {coord[1]} {coord[2]} {IDSCRF_radii_Angstrom[name]}\n')
file.close()
if 'arvo.bader.log' not in files or recreate_files['volumes']:
print('Run ARVO.bader for\t\t\t\t\t', colored(str(path_root_folder), attrs=['bold']))
com = str(self.path_arvo_executable) + ' protein=bader.ats log=arvo.bader.log'
p = Popen(com, cwd=path_root_folder)
p.wait()
if 'arvo.idscrf.log' not in files or recreate_files['volumes']:
print('Run ARVO.idscrf for\t\t\t\t\t', colored(str(path_root_folder), attrs=['bold']))
com = str(self.path_arvo_executable) + ' protein=idscrf.ats log=arvo.idscrf.log'
p = Popen(com, cwd=path_root_folder)
p.wait()
excluded_volumes = {}
file = open(path_root_folder / 'arvo.bader.log')
excluded_volumes['molecule'] = float(file.readline().split()[1])
file.close()
file = open(path_root_folder / 'arvo.idscrf.log')
excluded_volumes['cavity'] = float(file.readline().split()[1])
file.close()
excluded_volumes['free'] = (excluded_volumes['cavity']**(1/3) - excluded_volumes['molecule']**(1/3))**3
else:
excluded_volumes = None
else:
nac_ddec = None
nac_bader = None
dos = None
excluded_volumes = None
if parse_folders_names:
self.lock.acquire()
system_proccessed = self.get_system(substrate, adsorbate, idx)
if len(system_proccessed) == 1:
if output_phonons is not None:
system_proccessed[0]['output_phonons'] = output_phonons
else:
system_proccessed[0]['output'] = output
system_proccessed[0]['nac_ddec'] = nac_ddec
system_proccessed[0]['dos'] = dos
system_proccessed[0]['nac_bader'] = nac_bader
system_proccessed[0]['excluded_volumes'] = excluded_volumes
self.lock.release()
elif len(system_proccessed) == 0:
system: System = {'substrate': substrate,
'adsorbate': adsorbate,
'idx': idx,
'output': output,
'nac_ddec': nac_ddec,
'output_phonons': output_phonons,
'dos': dos,
'nac_bader': nac_bader,
'excluded_volumes': excluded_volumes}
self.systems.append(system)
self.lock.release()
else:
self.lock.release()
raise ValueError(f'There should be 0 ot 1 copy of the system in the InfoExtractor.'
f'However there are {len(system_proccessed)} systems copies of following system: '
f'{substrate=}, {adsorbate=}, {idx=}')
def get_system(self, substrate: str, adsorbate: str, idx: int = None) -> list[System]:
if idx is None:
return [system for system in self.systems if
system['substrate'] == substrate and system['adsorbate'] == adsorbate]
else:
return [system for system in self.systems if
system['substrate'] == substrate and system['adsorbate'] == adsorbate and system['idx'] == idx]
def get_F(self, substrate: str, adsorbate: str, idx: int,
units: Literal['eV', 'Ha'] = 'eV',
T: float | int = None) -> float:
if T is None:
E = self.get_system(substrate, adsorbate, idx)[0]['output'].energy_ionic_hist['F'][-1]
if units == 'Ha':
return E
elif units == 'eV':
return E * Hartree2eV
else:
raise ValueError(f'units should be "Ha" or "eV" however "{units}" was given')
elif isinstance(T, float | int):
E = self.get_system(substrate, adsorbate, idx)[0]['output'].energy_ionic_hist['F'][-1]
E_vib = self.get_Gibbs_vib(substrate, adsorbate, idx, T)
if units == 'Ha':
return E + E_vib * eV2Hartree
elif units == 'eV':
return E * Hartree2eV + E_vib
else:
raise ValueError(f'units should be "Ha" or "eV" however "{units}" was given')
else:
raise ValueError(f'T should be None, float or int, but {type(T)} was given')
def get_G(self, substrate: str, adsorbate: str, idx: int,
units: Literal['eV', 'Ha'] = 'eV',
T: float | int = None) -> float:
if T is None:
E = self.get_system(substrate, adsorbate, idx)[0]['output'].energy_ionic_hist['G'][-1]
if units == 'Ha':
return E
elif units == 'eV':
return E * Hartree2eV
else:
raise ValueError(f'units should be "Ha" or "eV" however "{units}" was given')
elif isinstance(T, float | int):
E = self.get_system(substrate, adsorbate, idx)[0]['output'].energy_ionic_hist['G'][-1]
E_vib = self.get_Gibbs_vib(substrate, adsorbate, idx, T)
if units == 'Ha':
return E + E_vib * eV2Hartree
elif units == 'eV':
return E * Hartree2eV + E_vib
else:
raise ValueError(f'units should be "Ha" or "eV" however "{units}" was given')
else:
raise ValueError(f'T should be None, float or int, but {type(T)} was given')
def get_N(self, substrate: str, adsorbate: str, idx: int) -> float:
return self.get_system(substrate, adsorbate, idx)[0]['output'].nelec
def get_mu(self, substrate: str, adsorbate: str, idx: int) -> float:
return self.get_system(substrate, adsorbate, idx)[0]['output'].mu
def get_Gibbs_vib(self, substrate: str, adsorbate: str, idx: int, T: float) -> float:
return self.get_system(substrate, adsorbate, idx)[0]['output_phonons'].thermal_props.get_Gibbs_vib(T)
def plot_energy(self, substrate: str, adsorbate: str):
systems = self.get_system(substrate, adsorbate)
energy_min = min(system['output'].energy for system in systems)
i, j = np.divmod(len(systems), 3)
i += 1
if j == 0:
i -= 1
fig, axs = plt.subplots(int(i), 3, figsize=(25, 5 * i), dpi=180)
fig.subplots_adjust(wspace=0.3, hspace=0.2)
for system, ax_e in zip(systems, axs.flatten()):
out = system['output']
delta_ionic_energy = (out.energy_ionic_hist['F'] - out.energy_ionic_hist['F'][-1]) * Hartree2eV
if (delta_ionic_energy < 0).any():
energy_modulus_F = True
delta_ionic_energy = np.abs(delta_ionic_energy)
else:
energy_modulus_F = False
ax_e.plot(range(out.nisteps), delta_ionic_energy, color='r', label=r'$\Delta F$', ms=3, marker='o')
if 'G' in out.energy_ionic_hist.keys():
delta_ionic_energy = (out.energy_ionic_hist['G'] - out.energy_ionic_hist['G'][-1]) * Hartree2eV
if (delta_ionic_energy < 0).any():
energy_modulus_G = True
delta_ionic_energy = np.abs(delta_ionic_energy)
else:
energy_modulus_G = False
else:
energy_modulus_G = None
ax_e.plot(range(out.nisteps), delta_ionic_energy, color='orange', label=r'$\Delta G$', ms=3, marker='o')
ax_e.set_yscale('log')
ax_e.set_xlabel(r'$Step$', fontsize=12)
if energy_modulus_F:
ylabel = r'$|\Delta F|, \ $'
else:
ylabel = r'$\Delta F, \ $'
if energy_modulus_G is not None:
if energy_modulus_G:
ylabel += r'$|\Delta G|, \ $'
else:
ylabel += r'$\Delta G, \ $'
ylabel += r'$eV$'
ax_e.set_ylabel(ylabel, color='r', fontsize=14)
ax_e.legend(loc='upper right', fontsize=14)
delta_E = (out.energy - energy_min) * Hartree2eV
if np.abs(delta_E) < 1e-8:
ax_e.text(0.5, 0.9, rf'$\mathbf{{E_f - E_f^{{min}} = {np.round(delta_E, 2)} \ eV}}$', ha='center', va='center', transform=ax_e.transAxes, fontsize=12)
ax_e.set_title(rf'$\mathbf{{ {substrate} \ {adsorbate} \ {system["idx"]} }}$', fontsize=13, y=1, pad=-15)
else:
ax_e.text(0.5, 0.9, rf'$E_f - E_f^{{min}} = {np.round(delta_E, 2)} \ eV$', ha='center', va='center', transform=ax_e.transAxes, fontsize=12)
ax_e.set_title(rf'${substrate} \ {adsorbate} \ {system["idx"]}$', fontsize=13, y=1, pad=-15)
ax_f = ax_e.twinx()
ax_f.plot(range(len(out.get_forces())), out.get_forces() * Hartree2eV / (Bohr2Angstrom ** 2), color='g', label=r'$\left< |\vec{F}| \right>$', ms=3, marker='o')
ax_f.set_ylabel(r'$Average \ Force, \ eV / \AA^3$', color='g', fontsize=13)
ax_f.legend(loc='upper right', bbox_to_anchor=(1, 0.8), fontsize=13)
ax_f.set_yscale('log')
def create_z_displacements(folder_source: str | Path,
folder_result: str | Path,
n_atoms_mol: int,
scan_range: NDArray[Shape['Nsteps'], Number] | list[float],
create_flat_surface: bool = False,
folder_files_to_copy: str | Path = None) -> None:
"""
Create folder with all necessary files for displacing the selected atoms along z-axis
Args:
folder_source: path for the folder with .lattice and .ionpos JDFTx files that will be initial files for configurations
folder_result: path for the folder where all final files will be saved
n_atoms_mol: number of atoms that should be displaced. All atoms must be in the end atom list in .ionpos
scan_range: array with displacement (in angstroms) for the selected atoms
create_flat_surface: if True all atoms will be projected into graphene surface; if False all atoms except molecules remain at initial positions
folder_files_to_copy: path for the folder with input.in and run.sh files to copy into each folder with final configurations
"""
if isinstance(folder_source, str):
folder_source = Path(folder_source)
if isinstance(folder_result, str):
folder_result = Path(folder_result)
if isinstance(folder_files_to_copy, str):
folder_files_to_copy = Path(folder_files_to_copy)
substrate, adsorbate, idx, *_ = folder_source.name.split('_')
lattice = Lattice.from_file(folder_source / 'jdft.lattice')
for d_ang in scan_range:
d_ang = np.round(d_ang, 2)
d_bohr = d_ang * Angstrom2Bohr
ionpos = Ionpos.from_file(folder_source / 'jdft.ionpos')
Path(folder_result / f'{substrate}_{adsorbate}_{idx}/{d_ang}').mkdir(parents=True, exist_ok=True)
ionpos.coords[-n_atoms_mol:, 2] += d_bohr
idx_surf = [i for i, coord in enumerate(ionpos.coords) if np.abs(coord[0]) < 1 or np.abs(coord[1]) < 1]
z_carbon = np.mean(ionpos.coords[idx_surf], axis=0)[2]
if create_flat_surface:
ionpos.coords[:-n_atoms_mol, 2] = z_carbon
else:
ionpos.coords[idx_surf, 2] = z_carbon
ionpos.move_scale[-n_atoms_mol:] = 0
ionpos.move_scale[idx_surf] = 0
ionpos.to_file(folder_result / f'{substrate}_{adsorbate}_{idx}/{d_ang}/jdft.ionpos')
lattice.to_file(folder_result / f'{substrate}_{adsorbate}_{idx}/{d_ang}/jdft.lattice')
poscar = ionpos.convert('vasp', lattice)
poscar.to_file(folder_result / f'{substrate}_{adsorbate}_{idx}/{d_ang}/POSCAR')
shutil.copyfile(folder_files_to_copy / 'input.in', folder_result / f'{substrate}_{adsorbate}_{idx}/{d_ang}/input.in')
shutil.copyfile(folder_files_to_copy / 'run.sh', folder_result / f'{substrate}_{adsorbate}_{idx}/{d_ang}/run.sh')

0
logfile_NEB.log Normal file
View File

82
neb.py Executable file
View File

@@ -0,0 +1,82 @@
#!/usr/bin/env python3
"""
NEB path generator for VASP
Usage: python neb.py -n 5 00/POSCAR 0N/POSCAR
"""
import numpy as np
import os
import sys
import argparse
from ase import Atoms
from ase.io import read, write
from ase.neb import NEB
from ase.calculators.vasp import Vasp
def create_interpolated_images(initial, final, n_images):
"""Create NEB images between initial and final structures"""
images = [initial]
# Create intermediate images
for i in range(1, n_images):
image = initial.copy()
# Linear interpolation
image.positions = initial.positions + i/(n_images+1) * (final.positions - initial.positions)
images.append(image)
images.append(final)
# Create NEB object for interpolation
neb = NEB(images)
neb.interpolate()
return images
def main():
parser = argparse.ArgumentParser(description='Generate NEB images for VASP')
parser.add_argument('-n', '--n_images', type=int, default=3,
help='Number of intermediate images')
parser.add_argument('initial_poscar', help='Path to initial POSCAR')
parser.add_argument('final_poscar', help='Path to final POSCAR')
parser.add_argument('--method', default='idpp',
help='Interpolation method (idpp or linear)')
args = parser.parse_args()
# Read structures
initial = read(args.initial_poscar, format='vasp')
final = read(args.final_poscar, format='vasp')
print(f"Initial structure: {len(initial)} atoms")
print(f"Final structure: {len(final)} atoms")
# Check atom count
if len(initial) != len(final):
print("ERROR: Initial and final structures must have same number of atoms!")
sys.exit(1)
# Create images
images = create_interpolated_images(initial, final, args.n_images)
# Create directories and write POSCAR files
for i, image in enumerate(images):
dir_name = f"{i:02d}"
os.makedirs(dir_name, exist_ok=True)
write(f"{dir_name}/POSCAR", image, format='vasp', direct=True)
print(f"Created {dir_name}/POSCAR")
# Write NEB information
with open('NEB_INFO.txt', 'w') as f:
f.write(f"NEB calculation with {len(images)} images\n")
f.write(f"Initial: {args.initial_poscar}\n")
f.write(f"Final: {args.final_poscar}\n")
f.write(f"Intermediate images: {args.n_images}\n")
print(f"\nCreated {len(images)} images in directories 00-{len(images)-1:02d}")
print("Next steps:")
print("1. Copy INCAR, KPOINTS, POTCAR to all image directories")
print("2. Run VASP in the main directory")
print("3. Check convergence in OUTCAR files")
if __name__ == '__main__':
main()

22
prepare_neb.sh Executable file
View File

@@ -0,0 +1,22 @@
#!/bin/bash
# Prepare NEB calculation
# Number of intermediate images
N_IMAGES=5
# Create images using neb.py
python electrochemistry/echem/neb/neb.py -n $N_IMAGES 00/POSCAR 0N/POSCAR
# Copy common files to all image directories
for i in $(seq 0 $((N_IMAGES+1))); do
dir=$(printf "%02d" $i)
if [ -d "$dir" ]; then
cp INCAR $dir/
cp KPOINTS $dir/
cp POTCAR $dir/
echo "Copied files to $dir/"
fi
done
echo "Preparation complete!"
#echo "To run NEB: mpirun -np 24 vasp_std"