This commit is contained in:
2026-01-08 19:47:32 +03:00
commit 4d7676a79e
89 changed files with 62260 additions and 0 deletions

View File

@@ -0,0 +1,52 @@
from __future__ import annotations
import numpy as np
from pathlib import Path
from nptyping import NDArray, Shape, Number
class ACF:
def __init__(self,
coords: NDArray[Shape['Natoms, 3'], Number],
nelec_per_atom: NDArray[Shape['Natoms'], Number],
spin_per_atom: NDArray[Shape['Natoms'], Number] = None):
self.coords = coords
self.nelec_per_atom = nelec_per_atom
self.nelec_per_isolated_atom = None
self.spin_per_atom = spin_per_atom
@staticmethod
def from_file(filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath)
file.readline()
line = file.readline()
ncols = len(line.split('+'))
if '+' in line:
if ncols == 7:
data = np.genfromtxt(filepath, skip_header=2, skip_footer=5, delimiter='|')
return ACF(data[:, 1:4], data[:, 4])
elif ncols == 8:
data = np.genfromtxt(filepath, skip_header=2, skip_footer=7, delimiter='|')
return ACF(data[:, 1:4], data[:, 4], data[:, 5])
else:
raise IOError(f'Can parse ACF.dat with 7 or 8 columns, but {ncols=} was given')
else:
data = np.genfromtxt(filepath, skip_header=2, skip_footer=4)
return ACF(data[:, 1:4], data[:, 4])
def get_charge(self,
nelec_per_isolated_atom: NDArray[Shape['Natoms'], Number] | None = None):
if nelec_per_isolated_atom is not None:
return nelec_per_isolated_atom - self.nelec_per_atom
else:
if self.nelec_per_isolated_atom is not None:
return self.nelec_per_isolated_atom - self.nelec_per_atom
else:
raise ValueError('nelec_per_isolated_atom should be defined either as argument of '
'this function or as self.nelec_per_isolated_atom')
def get_delta_elec(self,
nelec_per_isolated_atom: NDArray[Shape['Natoms'], Number]):
return self.nelec_per_atom - nelec_per_isolated_atom

View File

@@ -0,0 +1,233 @@
from matplotlib import colors
import numpy as np
from monty.re import regrep
from echem.core.structure import Structure
from nptyping import NDArray, Shape, Number
from dataclasses import dataclass
from echem.core.constants import ElemNum2Name, Bohr2Angstrom
@dataclass()
class LocalMultipoleMoments:
net_charges: NDArray[Shape['Natoms'], Number]
dipoles: NDArray[Shape['Natoms, 4'], Number]
quadrupoles: NDArray[Shape['Natoms, 8'], Number]
class Output_DDEC:
def __init__(self,
structure: Structure,
lmm_hirshfeld: LocalMultipoleMoments,
lmm_ddec: LocalMultipoleMoments,
charges_cm5: NDArray[Shape['Natons'], Number]):
self.structure = structure
self.lmm_hirshfeld = lmm_hirshfeld
self.lmm_ddec = lmm_ddec
self.charges_cm5 = charges_cm5
@staticmethod
def _process_lmm_(data, line_number, natoms):
charges_ddec = np.zeros(natoms)
dipoles_ddec = np.zeros((natoms, 4))
quadrupoles_ddec = np.zeros((natoms, 8))
idx = 0
while len(line := data[line_number].split()) != 0:
charges_ddec[idx] = float(line[5])
dipoles_ddec[idx] = list(map(float, line[6: 10]))
quadrupoles_ddec[idx] = list(map(float, line[10:]))
line_number += 1
idx += 1
return LocalMultipoleMoments(charges_ddec, dipoles_ddec, quadrupoles_ddec)
@staticmethod
def from_file(filepath):
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'lattice': r' parameters',
'lmm': r'Multipole analysis for each of the expansion sites.',
'cm5': r'The computed CM5 net atomic charges are:'}
matches = regrep(filepath, patterns)
lattice = np.zeros((3, 3))
i = matches['lattice'][0][1]
natoms = int((data[i + 1].split()[0]).split('.')[0])
line = data[i + 2].split()
NX = int(line[0].split('.')[0])
lattice[0] = np.array([float(line[1]), float(line[2]), float(line[3])])
line = data[i + 3].split()
NY = int(line[0].split('.')[0])
lattice[1] = np.array([float(line[1]), float(line[2]), float(line[3])])
line = data[i + 4].split()
NZ = int(line[0].split('.')[0])
lattice[2] = np.array([float(line[1]), float(line[2]), float(line[3])])
if NX > 0 and NY > 0 and NZ > 0:
units = 'Bohr'
elif NX < 0 and NY < 0 and NZ < 0:
units = 'Angstrom'
else:
raise ValueError('The sign of the number of all voxels should be > 0 or < 0')
if units == 'Angstrom':
NX, NY, NZ = -NX, -NY, -NZ
lattice = lattice * np.array([NX, NY, NZ]).reshape((-1, 1)) * Bohr2Angstrom
coords = np.zeros((natoms, 3))
species = []
line_number = matches['lmm'][0][1] + 3
idx = 0
while len(line := data[line_number].split()) != 0:
species.append(ElemNum2Name[int(line[1])])
coords[idx] = list(map(float, line[2:5]))
line_number += 1
structure = Structure(lattice, species, coords)
line_number = matches['lmm'][0][1] + 3
lmm_hirshfeld = Output_DDEC._process_lmm_(data, line_number, natoms)
line_number = matches['lmm'][1][1] + 3
lmm_ddec = Output_DDEC._process_lmm_(data, line_number, natoms)
line_number = matches['cm5'][0][1] + 1
charges_cm5 = []
i = 0
while i < natoms:
charges = list(map(float, data[line_number].split()))
charges_cm5 += charges
line_number += 1
i += len(charges)
return Output_DDEC(structure, lmm_hirshfeld, lmm_ddec, np.array(charges_cm5))
class AtomicNetCharges:
"""Class that operates with DDEC output file DDEC6_even_tempered_net_atomic_charges.xyz"""
def __init__(self, structure: Structure, net_charges, dipoles_xyz=None,
dipoles_mag=None, Qs=None, quadrupole_tensor_eigs=None, date=None):
"""
Create a DDEC class object.
Args:
structure (Structure class): a base class that contains lattice, coords and species information
net_charges:
dipoles_xyz:
dipoles_mag:
Qs:
quadrupole_tensor_eigs:
"""
self.structure = structure
self.net_charges = net_charges
self.dipoles_xyz = dipoles_xyz
self.dipoles_mag = dipoles_mag
self.Qs = Qs
self.quadrupole_tensor_eigs = quadrupole_tensor_eigs
self.date = date
@staticmethod
def from_file(filepath):
"""
Read the positions of atoms and theirs charges
from file "DDEC6_even_tempered_net_atomic_charges.xyz"
Parameters:
----------
filepath: str
Path to file with atomic charges
Returns:
-------
DDEC class instance
"""
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'date': r'\s+(\d\d\d\d/\d\d/\d\d\s+\d\d:\d\d:\d\d)'}
matches = regrep(filepath, patterns)
date = matches['date'][0][0][0]
natoms = int(data[0])
x_axis = data[1].split()[10:13]
y_axis = data[1].split()[15:18]
z_axis = data[1].split()[20:23]
lattice = np.array([x_axis, y_axis, z_axis], dtype=np.float32)
for start_line, string in enumerate(data):
if 'The following XYZ coordinates are in angstroms' in string:
break
coords = np.zeros((natoms, 3))
species = []
net_charges = np.zeros(natoms)
dipoles_xyz = np.zeros((natoms, 3))
dipoles_mag = np.zeros(natoms)
Qs = np.zeros((natoms, 5))
quadrupole_tensor_eigs = np.zeros((natoms, 3))
for i, j in enumerate(range(start_line + 2, start_line + 2 + natoms)):
line_splitted = data[j].split()
species.append(line_splitted[1])
coords[i] = line_splitted[2:5]
net_charges[i] = line_splitted[5]
dipoles_xyz[i] = line_splitted[6:9]
dipoles_mag[i] = line_splitted[9]
Qs[i] = line_splitted[10:15]
quadrupole_tensor_eigs[i] = line_splitted[15:18]
structure = Structure(lattice, species, coords, coords_are_cartesian=True)
return AtomicNetCharges(structure, net_charges, dipoles_xyz, dipoles_mag, Qs, quadrupole_tensor_eigs, date)
class AtomicSpinMoments:
"""Class that operates with DDEC output file DDEC6_even_tempered_atomic_spin_moments.xyz"""
def __init__(self, structure: Structure, spin_moments, date):
self.structure = structure
self.spin_moments = spin_moments
self.date = date
@staticmethod
def from_file(filepath):
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'date': r'\s+(\d\d\d\d/\d\d/\d\d\s+\d\d:\d\d:\d\d)'}
matches = regrep(filepath, patterns)
date = matches['date'][0][0][0]
natoms = int(data[0])
x_axis = data[1].split()[10:13]
y_axis = data[1].split()[15:18]
z_axis = data[1].split()[20:23]
lattice = np.array([x_axis, y_axis, z_axis], dtype=np.float32)
coords = np.zeros((natoms, 3))
species = []
spin_moments = np.zeros(natoms)
for i, j in enumerate(range(2, 2 + natoms)):
line_splitted = data[j].split()
species += [line_splitted[0]]
coords[i] = line_splitted[1:4]
spin_moments[i] = line_splitted[4]
structure = Structure(lattice, species, coords, coords_are_cartesian=True)
return AtomicSpinMoments(structure, spin_moments, date)
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))

View File

@@ -0,0 +1,114 @@
import numpy as np
from typing import Union, List, Iterable
from monty.re import regrep
import re
from echem.io_data.ddec import AtomicNetCharges
class GasSensor:
def __init__(self):
pass
@staticmethod
def read_OUTCAR(filepath) -> float:
"""
This function reads your OUTCAR file to get the final Energy of the system.
"""
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'energy_ionic': r'free energy\s+TOTEN\s+=\s+(.\d+\.\d+)\s+eV'}
matches = regrep(filepath, patterns)
end_energy = np.array([float(i[0][0]) for i in matches['energy_ionic']])
return end_energy[-1]
@staticmethod
def sort_DDEC_output(filepath, k: int) -> list[int]:
"""
This function sorts atoms in your system to get atoms related to your molecule
k - number of atoms consisting in your molecule
"""
z_coords = {}
idx = 1
with open(filepath, 'r') as file:
while True:
line = file.readline()
if re.search('\sChargemol', line) is not None:
break
list_of_substrings = line.split(' ')
if re.search('^\s|^j', list_of_substrings[0]) is None:
z_coords[idx] = list_of_substrings[-2]
idx += 1
continue
sorted_z_coords = dict(sorted(z_coords.items(), key = lambda item: item[1], reverse = True))
result = []
counter = 0
for item in sorted_z_coords:
if counter < k:
result.append(item)
counter += 1
else:
break
return result
@staticmethod
def get_chrg_mol(filepath, k: int or list[int]) -> float:
"""
This function can help you to get the molecule charge.
filepath - DDEC6 output file: DDEC_even_tempered_net_atomic_charges.xyz
k - the number of atoms, consisting in molecule. Besides, your can write the ordinal number of an atom in your molecule.
"""
atomic_charges = AtomicNetCharges.from_file(filepath)
net_charges = atomic_charges.net_charges
if type(k) == int:
chrg_molecule = 0
targets = GasSensor.sort_DDEC_output(filepath, k)
for i in targets:
chrg_molecule += net_charges[i - 1]
return chrg_molecule
elif isinstance(k, list):
targets = k
chrg_molecule = 0
for i in targets:
chrg_molecule += net_charges[i - 1]
return chrg_molecule
@staticmethod
def get_Ead(filepath, E_surface, E_molecule) -> float:
"""
This function can help you to get the adsorption energy, using energy of whole system, energy of the surface and energy of your molecule.
filepath - your OUTCAR file obtained as a result of VASP optimization.
"""
E_system = GasSensor.read_OUTCAR(filepath)
E_ad = E_system - E_surface - E_molecule
#print(E_ad)
return E_ad
@staticmethod
def get_energy_in_meV(filepath):
"""
This function can help you to get energies in meV.
filepath - your .txt file, consisting of energies in eV.
"""
X = np.genfromtxt(filepath)
X_new = []
for i in X:
X_new = X * 1000
return f'Your energies in meV: {X_new}'

View File

@@ -0,0 +1,227 @@
import os
import re
import shutil
from typing import Callable
from echem.io_data.jdftx import Ionpos, Lattice
from echem.io_data.vasp import Poscar
from echem.core.structure import Structure
from echem.core.constants import THz2eV
from echem.core.thermal_properties import ThermalProperties
from InterPhon.core import PreProcess, PostProcess
from nptyping import NDArray, Shape, Number
from typing import Union
from pathlib import Path
class InterPhonInterface(ThermalProperties):
def __init__(self,
folder_to_jdftx_files: Union[str, Path],
folder_files_to_copy: Union[str, Path] = None,
select_fun: Callable[[Structure], list[list[str]]] = None,
user_args: dict = None,
sym_flag: bool = True):
if isinstance(folder_to_jdftx_files, str):
folder_to_jdftx_files = Path(folder_to_jdftx_files)
if isinstance(folder_files_to_copy, str):
folder_files_to_copy = Path(folder_files_to_copy)
self.folder_to_jdftx_files = folder_to_jdftx_files
self.folder_files_to_copy = folder_files_to_copy
self.select_fun = select_fun
self.user_args = user_args
self.sym_flag = sym_flag
self.post_process = None
self.eigen_freq = None
self.weights = None
def _create_poscar_for_interphon(self,
folder_to_jdftx_files: Path,
select_fun: Callable[[Structure], list[list[str]]] = None) -> None:
"""
Function creates POSCAR with unitcell for InterPhon adding selective dynamics data
Args:
folder_to_jdftx_files (str): path to folder with jdft.ionpos and jdft.lattice files
select_fun (Callable, optional): function that take Structure as input and provides list with
selective dynamics data for POSCAR class. All atoms are allowed to move in default.
"""
ionpos = Ionpos.from_file(folder_to_jdftx_files / 'jdft.ionpos')
lattice = Lattice.from_file(folder_to_jdftx_files / 'jdft.lattice')
poscar = ionpos.convert('vasp', lattice)
if select_fun is not None:
sd_data = select_fun(poscar.structure)
else:
sd_data = [['T', 'T', 'T'] for _ in range(poscar.structure.natoms)]
poscar.sdynamics_data = sd_data
poscar.to_file(folder_to_jdftx_files / 'POSCAR_unitcell_InterPhon')
def _make_preprocess(self,
poscar_unitcell: Path,
folder_to_disps: Path,
folder_files_to_copy: Path = None,
user_args: dict = None,
sym_flag: bool = True) -> None:
"""
Function creates folders with POSCARs with displaced atoms and all other necessary files for calculation
Args:
poscar_unitcell (str): path to the POSCAR file that contains the unitcell for IterPhon
with defined sd dynamics
folder_to_disps (str): path to a folder where all new folders with corresponding POSCARs with
displaced atoms will be created
folder_files_to_copy (str, optional): path to a folder from which all files will be copied to each
new folder with new POSCARs
user_args (dict, optional): dist with all necessary information for the InterPhon PreProcess class.
Only 2D periodicity is supported. If you want switch off symmetries, you have to define 'periodicity'
in user_args and set sym_flag=False
Example and default value: user_args = {'dft_code': 'vasp', 'displacement': 0.05,
'enlargement': "1 1 1", 'periodicity': "1 1 0"}
sym_flag (bool, optional): if True the symmetry will be applied. Only 2D symmetries are supported
"""
if user_args is None:
user_args = {'dft_code': 'vasp',
'displacement': 0.05,
'enlargement': '1 1 1',
'periodicity': '1 1 0'}
if poscar_unitcell != folder_to_disps / 'POSCAR_unitcell_InterPhon':
shutil.copyfile(poscar_unitcell, folder_to_disps / 'POSCAR_unitcell_InterPhon')
pre_process = PreProcess()
pre_process.set_user_arg(user_args)
pre_process.set_unit_cell(in_file=str(poscar_unitcell),
code_name='vasp')
pre_process.set_super_cell(out_file=str(folder_to_disps / 'POSCAR_supercell_InterPhon'),
code_name='vasp')
pre_process.write_displace_cell(out_file=str(folder_to_disps / 'POSCAR'),
code_name='vasp',
sym_flag=sym_flag)
poscars_disp = [f for f in folder_to_disps.iterdir() if f.is_file() and bool(re.search(r'POSCAR-\d{4}$',
f.name))]
for poscar_disp in poscars_disp:
poscar = Poscar.from_file(poscar_disp)
ionpos, lattice = poscar.convert('jdftx')
subfolder_to_disp = folder_to_disps / poscar_disp.name[-4:]
if not os.path.isdir(subfolder_to_disp):
os.mkdir(subfolder_to_disp)
ionpos.to_file(subfolder_to_disp / 'jdft.ionpos')
lattice.to_file(subfolder_to_disp / 'jdft.lattice')
shutil.copyfile(folder_to_disps / poscar_disp, subfolder_to_disp / 'POSCAR')
if folder_files_to_copy is not None:
files_to_copy = [f for f in folder_files_to_copy.iterdir() if f.is_file()]
for file in files_to_copy:
shutil.copyfile(file, subfolder_to_disp / file.name)
with open(folder_to_disps / 'user_args_InterPhon', 'w') as file:
for key, value in user_args.items():
file.write(f'{key}: {value}\n')
def _make_postprocess(self,
folder_to_disps: Path,
filepath_unitcell: Path,
filepath_supercell: Path,
filepath_kpoints: Path,
user_args: dict = None,
sym_flag: bool = True) -> None:
"""
Function process the output files after all calculations with displaced atoms are finished
Args:
folder_to_disps (str): path to the folder contains all folders with performed calculations with
atom displacements
filepath_unitcell (str): path to the POSCAR file that contains the unitcell for IterPhon
with defined sd dynamics
filepath_supercell (str): path to the POSCAR file produced by InterPhon with proper enlargement
filepath_kpoints (str): path to the KPOINTS file. The phonons will be assessed in the given k-points
user_args (dict, optional): dist with all necessary information for the InterPhon PreProcess class.
Example and default value: user_args = {'dft_code': 'vasp', 'displacement': 0.05,
'enlargement': "1 1 1", 'periodicity': "1 1 0"}
sym_flag (bool, optional): if True the symmetry will be applied. Only 2D symmetries are supported
"""
if user_args is None:
user_args = {'dft_code': 'vasp',
'displacement': 0.05,
'enlargement': '1 1 1',
'periodicity': '1 1 0'}
output_paths = [f / 'output.out' for f in folder_to_disps.iterdir()
if f.is_dir() and bool(re.search(r'\d{4}$', f.name))]
post_process = PostProcess(in_file_unit_cell=str(filepath_unitcell),
in_file_super_cell=str(filepath_supercell),
code_name='vasp')
post_process.set_user_arg(user_args)
post_process.set_reciprocal_lattice()
post_process.set_force_constant(force_files=[str(f) for f in output_paths],
code_name='jdftx',
sym_flag=sym_flag)
post_process.set_k_points(k_file=str(filepath_kpoints))
post_process.eval_phonon()
self.post_process = post_process
ThermalProperties.__init__(self, self.post_process.w_q * THz2eV)
def create_displacements_jdftx(self):
self._create_poscar_for_interphon(folder_to_jdftx_files=self.folder_to_jdftx_files,
select_fun=self.select_fun)
self._make_preprocess(poscar_unitcell=self.folder_to_jdftx_files / 'POSCAR_unitcell_InterPhon',
folder_to_disps=self.folder_to_jdftx_files,
folder_files_to_copy=self.folder_files_to_copy,
user_args=self.user_args,
sym_flag=self.sym_flag)
def get_phonons(self) -> NDArray[Shape['Nkpts, Nfreq'], Number]:
if self.post_process is None:
self._make_postprocess(folder_to_disps=self.folder_to_jdftx_files,
filepath_unitcell=self.folder_to_jdftx_files / 'POSCAR_unitcell_InterPhon',
filepath_supercell=self.folder_to_jdftx_files / 'POSCAR_supercell_InterPhon',
filepath_kpoints=self.folder_to_jdftx_files / 'KPOINTS',
user_args=self.user_args,
sym_flag=self.sym_flag)
return self.eigen_freq
def get_Gibbs_ZPE(self) -> float:
if self.eigen_freq is None:
self._make_postprocess(folder_to_disps=self.folder_to_jdftx_files,
filepath_unitcell=self.folder_to_jdftx_files / 'POSCAR_unitcell_InterPhon',
filepath_supercell=self.folder_to_jdftx_files / 'POSCAR_supercell_InterPhon',
filepath_kpoints=self.folder_to_jdftx_files / 'KPOINTS',
user_args=self.user_args,
sym_flag=self.sym_flag)
return ThermalProperties.get_Gibbs_ZPE(self)
def get_enthalpy_vib(self,
T: float) -> float:
if self.eigen_freq is None:
self._make_postprocess(folder_to_disps=self.folder_to_jdftx_files,
filepath_unitcell=self.folder_to_jdftx_files / 'POSCAR_unitcell_InterPhon',
filepath_supercell=self.folder_to_jdftx_files / 'POSCAR_supercell_InterPhon',
filepath_kpoints=self.folder_to_jdftx_files / 'KPOINTS',
user_args=self.user_args,
sym_flag=self.sym_flag)
return ThermalProperties.get_enthalpy_vib(self, T)
def get_TS_vib(self,
T: float) -> float:
if self.eigen_freq is None:
self._make_postprocess(folder_to_disps=self.folder_to_jdftx_files,
filepath_unitcell=self.folder_to_jdftx_files / 'POSCAR_unitcell_InterPhon',
filepath_supercell=self.folder_to_jdftx_files / 'POSCAR_supercell_InterPhon',
filepath_kpoints=self.folder_to_jdftx_files / 'KPOINTS',
user_args=self.user_args,
sym_flag=self.sym_flag)
return ThermalProperties.get_TS_vib(self, T)

View File

@@ -0,0 +1,914 @@
from __future__ import annotations
import numpy as np
import re
from monty.re import regrep
from echem.core.structure import Structure
from echem.core.constants import Bohr2Angstrom, Angstrom2Bohr, Hartree2eV, eV2Hartree
from echem.core.ionic_dynamics import IonicDynamics
from echem.core.electronic_structure import EBS
from echem.core.thermal_properties import ThermalProperties
from echem.io_data import vasp
from echem.io_data.universal import Cube
from typing import Union, Literal, TypedDict
from typing_extensions import NotRequired
from pathlib import Path
import warnings
import copy
from nptyping import NDArray, Shape, Number
from termcolor import colored
class Lattice:
def __init__(self,
lattice: NDArray[Shape['3, 3'], Number]):
self.lattice = lattice
@staticmethod
def from_file(filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'lattice': r'^\s*lattice\s+'}
matches = regrep(str(filepath), patterns)
lattice = []
i = 0
while len(lattice) < 9:
line = data[matches['lattice'][0][1] + i].split()
for word in line:
try:
word = float(word)
lattice.append(word)
except:
pass
i += 1
lattice = np.array(lattice).reshape((3, 3))
return Lattice(lattice)
def to_file(self, filepath: str):
file = open(filepath, 'w')
file.write('lattice \\\n')
width_coords_float = max(len(str(int(np.max(self.lattice)))), len(str(int(np.min(self.lattice))))) + 16
for i, vector in enumerate(self.lattice):
file.write('\t')
for vector_i in vector:
file.write(f'{vector_i:{width_coords_float}.15f} ')
if i < 2:
file.write('\\')
file.write('\n')
file.close()
class Ionpos:
def __init__(self,
species: list[str],
coords: NDArray[Shape['Natoms, 3'], Number],
move_scale: list[int] | NDArray[Shape['Natoms'], Number] = None,
constraint_type: list[Literal['HyperPlane', 'Linear', 'None', 'Planar'] | None] = None,
constraint_params: list[list[float] | None] = None):
self.species = species
self.coords = coords
if move_scale is None:
move_scale = np.ones(len(coords), dtype=int)
elif isinstance(move_scale, list):
move_scale = np.array(move_scale, dtype=int)
self.move_scale = move_scale
self.constraint_type = constraint_type
self.constraint_params = constraint_params
@staticmethod
def from_file(filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'coords': r'^\s*ion\s+'}
matches = regrep(str(filepath), patterns)
natoms = len(matches['coords'])
species = []
coords = np.zeros((natoms, 3))
move_scale = np.zeros(natoms, dtype=int)
constraint_type = []
constraint_params = []
for i, ion in enumerate(matches['coords']):
line = data[ion[1]].split()
species.append(line[1])
coords[i] = [line[2], line[3], line[4]]
move_scale[i] = line[5]
if len(line) > 6:
constraint_type.append(line[6])
constraint_params.append([float(line[7]), float(line[8]), float(line[9])])
else:
constraint_type.append(None)
constraint_params.append(None)
return Ionpos(species, coords, move_scale, constraint_type, constraint_params)
def to_file(self,
filepath: str | Path) -> None:
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'w')
width_species = max([len(sp) for sp in self.species])
width_coords_float = max(len(str(int(np.max(self.coords)))), len(str(int(np.min(self.coords))))) + 16
if self.constraint_params is None and self.constraint_type is None:
for sp, coord, ms in zip(self.species, self.coords, self.move_scale):
file.write(f'ion {sp:{width_species}} ')
for coord_i in coord:
file.write(f'{coord_i:{width_coords_float}.15f} ')
file.write(f'{ms}\n')
elif self.constraint_params is not None and self.constraint_type is not None:
for sp, coord, ms, ctype, cparams in zip(self.species, self.coords, self.move_scale,
self.constraint_type, self.constraint_params):
file.write(f'ion {sp:{width_species}} ')
for coord_i in coord:
file.write(f'{coord_i:{width_coords_float}.15f} ')
if ctype is None:
file.write(f'{ms}\n')
else:
file.write(f'{ms} ')
file.write(f'{ctype} ')
file.write(f'{cparams[0]} {cparams[1]} {cparams[2]}\n')
else:
raise ValueError('constraint_type and constraint_params must be both specified or both be None')
file.close()
def convert(self,
format: Literal['vasp'], *args):
if format == 'vasp':
lattice = np.transpose(args[0].lattice) * Bohr2Angstrom
return vasp.Poscar(Structure(lattice, self.species, self.coords * Bohr2Angstrom))
else:
raise NotImplemented('Currently only format=vasp is supported')
def get_structure(self,
lattice: Lattice) -> Structure:
return Structure(lattice.lattice * Bohr2Angstrom, self.species, self.coords * Bohr2Angstrom)
class Input:
def __init__(self, commands: list[tuple[str, str]]):
self.commands = commands
@staticmethod
def from_file(filepath: str | Path):
file = open(filepath, 'r')
data = file.readlines()
file.close()
commands = []
to_append = ''
for line in data:
line = line.strip().strip('\n')
if line.endswith('\\'):
to_append += re.sub(r'\s+', ' ', line.strip('\\'))
else:
if len(line) == 0:
continue
to_append += line
line = to_append.split()
if line[0] == 'dump':
for i in line[2:]:
commands.append(('dump', f'{line[1]} {i}'))
else:
commands.append((line[0], ' '.join(line[1:])))
to_append = ''
return Input(commands)
class EnergyIonicHist(TypedDict):
F: NDArray[Shape['Nsteps'], Number]
G: NotRequired[NDArray[Shape['Nsteps'], Number]]
muN: NotRequired[NDArray[Shape['Nsteps'], Number]]
class Output(IonicDynamics):
def __init__(self,
fft_box_size: NDArray[Shape['3'], Number],
energy_ionic_hist: EnergyIonicHist,
coords_hist: NDArray[Shape['Nsteps, Natoms, 3'], Number],
forces_hist: NDArray[Shape['Nsteps, Natoms, 3'], Number] | None,
nelec_hist: NDArray[Shape['Nsteps'], Number],
magnetization_hist: NDArray[Shape['Nesteps, 2'], Number] | None,
structure: Structure,
nbands: int,
nkpts: int,
mu: float | None,
HOMO: float | None,
LUMO: float | None,
phonons: dict[Literal['real', 'imag', 'zero', 'nStates'], np.ndarray | None],
pseudopots: dict,
lowding: dict[str, float] | None):
super(Output, self).__init__(forces_hist, coords_hist, structure.lattice, True)
self.fft_box_size = fft_box_size
self.energy_ionic_hist = energy_ionic_hist
self.coords_hist = coords_hist
self.nelec_hist = nelec_hist
self.magnetization_hist = magnetization_hist
self.structure = structure
self.nbands = nbands
self.nkpts = nkpts
self.mu = mu
self.HOMO = HOMO
self.LUMO = LUMO
self.phonons = phonons
self.pseudopots = pseudopots
self.lowdin = lowding
if phonons['real'] is not None and len(phonons['real']) > 0:
self.thermal_props = ThermalProperties(np.array([phonons['real']]) * Hartree2eV)
@property
def energy(self) -> float:
if 'G' in self.energy_ionic_hist.keys():
return self.energy_ionic_hist['G'][-1]
else:
return self.energy_ionic_hist['F'][-1]
@property
def nisteps(self) -> int:
return len(self.energy_ionic_hist['F'])
@property
def nelec(self) -> float:
return self.nelec_hist[-1]
@property
def nelec_pzc(self) -> int:
return np.sum([self.structure.natoms_by_type[key] * self.pseudopots[key] for key in self.pseudopots.keys()])
@property
def magnetization_abs(self) -> float:
if self.magnetization_hist is None:
raise ValueError('It is non-spin-polarized calculation')
else:
return self.magnetization_hist[-1, 0]
@property
def magnetization_tot(self) -> float:
if self.magnetization_hist is None:
raise ValueError('It is non-spin-polarized calculation')
else:
return self.magnetization_hist[-1, 1]
@property
def nspin(self) -> int:
if self.magnetization_hist is None:
return 1
else:
return 2
@staticmethod
def from_file(filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
# \TODO Non-Cartesin coods case is not implemented
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'natoms': r'Initialized \d+ species with (\d+) total atoms.',
'coords': r'# Ionic positions in cartesian coordinates:',
'forces': r'# Forces in Cartesian coordinates:',
'fft_box_size': r'Chosen fftbox size, S = \[(\s+\d+\s+\d+\s+\d+\s+)\]',
'lattice': r'---------- Initializing the Grid ----------',
'nbands': r'nBands:\s+(\d+)',
'nkpts': r'Reduced to (\d+) k-points under symmetry',
'nkpts_folded': r'Folded \d+ k-points by \d+x\d+x\d+ to (\d+) k-points.',
'is_kpts_irreducable': r'No reducable k-points',
'nelec': r'nElectrons:\s+(\d+.\d+)',
'magnetization': r'magneticMoment:\s+\[\s+Abs:\s+(\d+.\d+)\s+Tot:\s+([-+]?\d*\.\d*)',
'mu': r'\s+mu\s+:\s+([-+]?\d*\.\d*)',
'mu_hist': r'mu:\s+([-+]?\d*\.\d*)',
'HOMO': r'\s+HOMO\s*:\s+([-+]?\d*\.\d*)',
'LUMO': r'\s+LUMO\s*:\s+([-+]?\d*\.\d*)',
'F': r'^\s*F\s+=\s+([-+]?\d*\.\d*)',
'muN': r'\s+muN\s+=\s+([-+]?\d*\.\d*)',
'G': r'\s+G\s+=\s+([-+]?\d*\.\d*)',
'phonon report': r'(\d+) imaginary modes, (\d+) modes within cutoff, (\d+) real modes',
'zero mode': r'Zero mode \d+:',
'imaginary mode': r'Imaginary mode \d+:',
'real mode': r'Real mode \d+:',
'ionic convergence': r'IonicMinimize: Converged',
'pseudopots': r'\s*Title:\s+([a-zA-Z0-9]*).',
'valence_elecs': r'(\d+) valence electrons in orbitals',
'phonon_perturbations': r'\s+Perturbation:\s+\d+\s+nStates:\s+(\d+)',
'lowdin': r'#--- Lowdin population analysis ---'}
matches = regrep(str(filepath), patterns)
F = np.array([float(i[0][0]) for i in matches['F']])
energy_ionic_hist: EnergyIonicHist = {'F': F}
if 'muN' in matches.keys():
energy_ionic_hist['muN'] = np.array([float(i[0][0]) for i in matches['muN']])
if 'G' in matches.keys():
energy_ionic_hist['G'] = np.array([float(i[0][0]) for i in matches['G']])
nelec_hist = np.array([float(i[0][0]) for i in matches['nelec']])
natoms = int(matches['natoms'][0][0][0])
nbands = int(matches['nbands'][0][0][0])
phonons = {}
if matches['phonon_perturbations']:
nstates = [int(i[0][0]) for i in matches['phonon_perturbations']]
phonons['nStates'] = np.array(nstates)
else:
phonons['nStates'] = None
if bool(matches['is_kpts_irreducable']):
nkpts = int(matches['nkpts_folded'][0][0][0])
else:
nkpts = int(matches['nkpts'][0][0][0])
if bool(matches['mu']):
mu = float(matches['mu'][0][0][0])
elif matches['mu_hist']:
mu = float(matches['mu_hist'][-1][0][0])
else:
mu = None
if bool(matches['HOMO']):
HOMO = float(matches['HOMO'][0][0][0])
else:
HOMO = None
if bool(matches['LUMO']):
LUMO = float(matches['LUMO'][0][0][0])
else:
LUMO = None
if bool(matches['magnetization']):
magnetization_hist = np.zeros((len(matches['magnetization']), 2))
for i, mag in enumerate(matches['magnetization']):
magnetization_hist[i] = [float(mag[0][0]), float(mag[0][1])]
else:
magnetization_hist = None
fft_box_size = np.array([int(i) for i in matches['fft_box_size'][0][0][0].split()])
lattice = np.zeros((3, 3))
lattice[0] = [float(i) for i in data[matches['lattice'][0][1] + 2].split()[1:4]]
lattice[1] = [float(i) for i in data[matches['lattice'][0][1] + 3].split()[1:4]]
lattice[2] = [float(i) for i in data[matches['lattice'][0][1] + 4].split()[1:4]]
lattice = lattice.T * Bohr2Angstrom
if matches['forces']:
line_numbers = [int(i[1]) + 1 for i in matches['forces']]
forces_hist = np.zeros((len(line_numbers), natoms, 3))
for i, line_number in enumerate(line_numbers):
atom_number = 0
while len(line := data[line_number + atom_number].split()) > 0:
forces_hist[i, atom_number] = [float(line[2]), float(line[3]), float(line[4])]
atom_number += 1
else:
forces_hist = None
if matches['phonon report']:
freq_report = {key: int(i) for key, i in zip(['imaginary modes', 'modes within cutoff', 'real modes'],
matches['phonon report'][0][0])}
if freq_report['modes within cutoff']:
line_numbers = [int(i[1]) + 1 for i in matches['zero mode']]
zero_mode_freq = np.zeros(freq_report['modes within cutoff'], dtype=complex)
for i, line_number in enumerate(line_numbers):
zero_mode_freq[i] = complex(data[line_number].split()[1].replace('i', 'j'))
else:
zero_mode_freq = None
if freq_report['imaginary modes']:
line_numbers = [int(i[1]) + 1 for i in matches['imaginary mode']]
imag_mode_freq = np.zeros(freq_report['imaginary modes'], dtype=complex)
for i, line_number in enumerate(line_numbers):
imag_mode_freq[i] = complex(data[line_number].split()[1].replace('i', 'j'))
else:
imag_mode_freq = None
if freq_report['real modes']:
line_numbers = [int(i[1]) + 1 for i in matches['real mode']]
real_mode_freq = np.zeros(freq_report['real modes'])
for i, line_number in enumerate(line_numbers):
real_mode_freq[i] = float(data[line_number].split()[1])
else:
real_mode_freq = None
phonons['zero'] = zero_mode_freq
phonons['imag'] = imag_mode_freq
phonons['real'] = real_mode_freq
else:
phonons['zero'] = None
phonons['imag'] = None
phonons['real'] = None
if matches['coords']:
line_numbers = [int(i[1]) + 1 for i in matches['coords']]
coords_hist = np.zeros((len(line_numbers), natoms, 3))
species = []
atom_number = 0
while len(line := data[line_numbers[0] + atom_number].split()) > 0:
species += [line[1]]
atom_number += 1
for i, line_number in enumerate(line_numbers):
atom_number = 0
while len(line := data[line_number + atom_number].split()) > 0:
coords_hist[i, atom_number] = [float(line[2]), float(line[3]), float(line[4])]
atom_number += 1
else:
matches = regrep(str(filepath), {'ions': r'ion\s+([a-zA-Z]+)\s+[-+]?\d*\.\d*',
'coords': r'ion\s+[a-zA-Z]+\s+([-+]?\d*\.\d*)\s+([-+]?\d*\.\d*)\s+([-+]?\d*\.\d*)'})
species = [i[0][0] for i in matches['ions']]
coords_hist = [[[float(i) for i in coord[0]] for coord in matches['coords']]]
coords_hist = np.array(coords_hist)
if bool(matches['lowdin']):
lowdin = {}
i = matches['lowdin'][-1][1] + 1
while (line := data[i]) != '\n':
line = line.split()
lowdin[line[2]] = [float(i) for i in line[3:]]
if bool(matches['magnetization']):
i += 2
else:
i += 1
else:
lowdin = None
structure = Structure(lattice, species, coords_hist[-1] * Bohr2Angstrom, coords_are_cartesian=True)
pseudopots = {i[0][0]: int(j[0][0]) for i, j in zip(matches['pseudopots'], matches['valence_elecs'])}
return Output(fft_box_size, energy_ionic_hist, coords_hist, forces_hist, nelec_hist, magnetization_hist,
structure, nbands, nkpts, mu, HOMO, LUMO, phonons, pseudopots, lowdin)
def get_xdatcar(self) -> vasp.Xdatcar:
transform = np.linalg.inv(self.structure.lattice)
return vasp.Xdatcar(structure=self.structure,
trajectory=np.matmul(self.coords_hist * Bohr2Angstrom, transform))
def get_poscar(self) -> vasp.Poscar:
structure = copy.copy(self.structure)
structure.coords = self.coords_hist[0] * Bohr2Angstrom
return vasp.Poscar(structure=structure)
def get_contcar(self) -> vasp.Poscar:
return vasp.Poscar(structure=self.structure)
def get_ionpos(self, nstep=-1) -> Ionpos:
return Ionpos(self.structure.species, self.coords_hist[nstep])
def get_lattice(self) -> Lattice:
return Lattice(self.structure.lattice * Angstrom2Bohr)
def mod_phonon_zero2real(self, n_leave: int = 0) -> None:
if self.phonons['zero'] is not None:
mask_real = self.phonons['zero'].imag == 0
mask_complex = np.invert(mask_real)
n_real = np.sum(mask_real)
n_imag = np.sum(mask_complex)
n_zero = len(self.phonons['zero'])
if n_zero < n_leave:
print(colored(f'There is only {n_zero} zero modes, however you set {n_leave=}',
color='red', attrs=['bold']))
elif n_zero > n_leave:
if n_leave > n_imag:
n_transfer = n_real - (n_leave - n_imag)
else:
n_transfer = np.sum(mask_real)
mods_for_transfer = None
if n_zero - n_transfer > n_leave:
print(colored(f'Can not leave', color='red', attrs=['bold']),
n_leave,
colored('modes, because there are', color='red', attrs=['bold']),
n_imag,
colored('imaginary modes', color='red', attrs=['bold']))
print(colored('The following values can not be converted to real:', color='red', attrs=['bold']),
self.phonons['zero'][mask_complex])
if np.any(mask_real):
mods_for_transfer = np.sort(self.phonons['zero'][mask_real].real)
print(colored('The following values will be converted to real:', color='red', attrs=['bold']),
self.phonons['zero'][mask_real])
else:
mods_for_transfer = np.sort(self.phonons['zero'][mask_real].real)[-n_transfer:]
if mods_for_transfer is not None:
self.phonons['real'] = np.hstack((mods_for_transfer, self.phonons['real']))
del_indices = []
for mode in mods_for_transfer:
del_indices.append(np.where(self.phonons['zero'] == mode)[0][0])
self.phonons['zero'] = np.delete(self.phonons['zero'], del_indices)
else:
print(colored('There are no zero phonons', color='green', attrs=['bold']))
class EBS_data:
@staticmethod
def from_file(filepath: str | Path,
output: Output) -> NDArray[Shape['Nspin, Nkpts, Nbands'], Number]:
if isinstance(filepath, str):
filepath = Path(filepath)
data = np.fromfile(filepath, dtype=np.float64)
if len(data) % (output.nkpts * output.nbands) != 0:
raise ValueError(
f'Number of eigenvalues should be equal to nspin * nkpts * nbands, but now {output.nkpts=},'
f'{output.nbands=}, and data has {len(data)} values')
nspin = len(data) // (output.nkpts * output.nbands)
data = data.reshape(nspin, output.nkpts, output.nbands)
return data
class Eigenvals(EBS_data):
def __init__(self,
eigenvalues: NDArray[Shape['Nspin, Nkpts, Nbands'], Number],
units: Literal['eV', 'Hartree']):
self.eigenvalues = eigenvalues
self.units = units
@staticmethod
def from_file(filepath: str | Path,
output: Output) -> 'Eigenvals':
if isinstance(filepath, str):
filepath = Path(filepath)
eigenvalues = super(Eigenvals, Eigenvals).from_file(filepath, output)
return Eigenvals(eigenvalues, 'Hartree')
def mod_to_eV(self):
if self.units == 'eV':
print('Units are already eV')
else:
self.eigenvalues *= Hartree2eV
self.units = 'eV'
def mod_to_Ha(self):
if self.units == 'Hartree':
print('Units are already Hartree')
else:
self.eigenvalues *= eV2Hartree
self.units = 'Hartree'
class Fillings(EBS_data):
def __init__(self,
occupations: np.ndarray):
self.occupations = occupations
@staticmethod
def from_file(filepath: str | Path,
output: Output) -> 'Fillings':
if isinstance(filepath, str):
filepath = Path(filepath)
occupations = super(Fillings, Fillings).from_file(filepath, output)
return Fillings(occupations)
class VolumetricData:
def __init__(self,
data: np.ndarray,
structure: Structure):
self.data = data
self.structure = structure
def __add__(self, other):
assert isinstance(other, VolumetricData), 'Other object must belong to VolumetricData class'
assert self.data.shape == other.data.shape, f'Shapes of two data arrays must be the same but they are ' \
f'{self.data.shape} and {other.data.shape}'
if self.structure != other.structure:
warnings.warn('Two VolumetricData instances contain different Structures. '
'The Structure will be taken from the 2nd (other) instance. '
'Hope you know, what you are doing')
return VolumetricData(self.data + other.data, other.structure)
def __sub__(self, other):
assert isinstance(other, VolumetricData), 'Other object must belong to VolumetricData class'
assert self.data.shape == other.data.shape, f'Shapes of two data arrays must be the same but they are ' \
f'{self.data.shape} and {other.data.shape}'
if self.structure != other.structure:
warnings.warn('Two VolumetricData instances contain different Structures. '
'The Structure will be taken from the 2nd (other) instance. '
'Hope you know, what you are doing')
return VolumetricData(self.data - other.data, other.structure)
@staticmethod
def from_file(filepath: str | Path,
fft_box_size: NDArray[Shape['3'], Number],
structure: Structure):
if isinstance(filepath, str):
filepath = Path(filepath)
data = np.fromfile(filepath, dtype=np.float64)
data = data.reshape(fft_box_size)
return VolumetricData(data, structure)
def convert_to_cube(self) -> Cube:
return Cube(self.data, self.structure, np.zeros(3))
class kPts:
def __init__(self,
weights: NDArray[Shape['Nkpts'], Number]):
self.weights = weights
@staticmethod
def from_file(filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'r')
data = file.readlines()
file.close()
weights = []
if 'spin' in data[0].split():
for line in data[:int(len(data) / 2)]:
weights.append(float(line.split()[6]))
weights = np.array(weights)
return kPts(weights)
else:
for line in data:
weights.append(float(line.split()[6]))
weights = np.array(weights) / 2
return kPts(weights)
class DOS(EBS):
@staticmethod
def from_folder(folderpath: str | Path,
output_name: str = 'output.out',
jdft_prefix='jdft',
units: Literal['eV', 'Ha'] = 'eV'):
if isinstance(folderpath, str):
folderpath = Path(folderpath)
out = Output.from_file(folderpath / output_name)
eigs = Eigenvals.from_file(folderpath / f'{jdft_prefix}.eigenvals', output=out)
fills = Fillings.from_file(folderpath / f'{jdft_prefix}.fillings', output=out)
kpts = kPts.from_file(folderpath / f'{jdft_prefix}.kPts')
if units == 'eV':
return DOS(eigenvalues=eigs.eigenvalues * Hartree2eV,
weights=kpts.weights,
efermi=out.mu * Hartree2eV,
occupations=fills.occupations)
elif units == 'Ha':
return DOS(eigenvalues=eigs.eigenvalues,
weights=kpts.weights,
efermi=out.mu,
occupations=fills.occupations)
else:
raise ValueError(f'units can be "eV" or "Ha", however you entered "{units}"')
class BandProjections:
def __init__(self,
proj_coeffs: NDArray[Shape['Nspin, Nkpts, Nbands, Norbs'], Number],
weights: NDArray[Shape['Nkpts'], Number],
species: list[str],
norbs_per_atomtype: dict,
orbs_names: list[str],
orbs_data: list[dict]):
self.proj_coeffs = proj_coeffs
self.weights = weights
self.species = species
self.norbs_per_atomtype = norbs_per_atomtype
self.orbs_names = orbs_names
self.orbs_data = orbs_data
self.eigenvalues = None
@property
def nspin(self):
return self.proj_coeffs.shape[0]
@property
def nkpts(self):
return self.proj_coeffs.shape[1]
@property
def nbands(self):
return self.proj_coeffs.shape[2]
@property
def norbs(self):
return self.proj_coeffs.shape[3]
@staticmethod
def from_file(filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'x': r'#\s+\d+'}
matches = regrep(str(filepath), patterns)
nstates = int(data[0].split()[0])
nbands = int(data[0].split()[2])
norbs = int(data[0].split()[4])
if 'spin' in data[int(matches['x'][0][1])]:
nspin = 2
else:
nspin = 1
nkpts = int(nstates / nspin)
proj_coeffs = np.zeros((nspin, nkpts, nbands, norbs))
weights = np.zeros(nstates)
start_lines = []
for i, match in enumerate(matches['x']):
start_lines.append(int(match[1]))
weights[i] = float(re.sub(r'[^0-9.]', '', data[int(match[1])].split()[7]))
if nspin == 2 and not np.array_equal(weights[:len(weights) // 2], weights[len(weights) // 2:]):
raise ValueError(f'Kpts weights can not be correctly split {weights=}')
if nspin == 2:
weights = weights[:len(weights) // 2]
species = []
norbs_per_atomtype = {}
orbs_names = []
orbs_data = []
idx_atom = -1
for iline in range(2, start_lines[0]):
line = data[iline].split()
atomtype = line[0]
natoms_per_atomtype = int(line[1])
species += [atomtype] * natoms_per_atomtype
norbs_per_atomtype[line[0]] = int(line[2])
l_max = int(line[3])
nshalls_per_l = []
for i in range(l_max + 1):
nshalls_per_l.append(int(line[4 + i]))
for i in range(natoms_per_atomtype):
idx_atom += 1
for l, n_max in zip(range(l_max + 1), nshalls_per_l):
for n in range(n_max):
if l == 0:
orbs_names.append(f'{idx_atom} {atomtype} s {n + 1}({n_max})')
orbs_data.append({'atom_type': atomtype,
'atom_index': idx_atom,
'l': l,
'm': 0,
'orb_name': 's'})
elif l == 1:
for m, m_name in zip([-1, 0, 1], ['p_x', 'p_y', 'p_z']):
orbs_names.append(f'{idx_atom} {atomtype} {m_name} {n + 1}({n_max})')
orbs_data.append({'atom_type': atomtype,
'atom_index': idx_atom,
'l': l,
'm': m,
'orb_name': m_name})
elif l == 2:
for m, m_name in zip([-2, -1, 0, 1, 2], ['d_xy', 'd_yz', 'd_z^2', 'd_xz', 'd_x^2-y^2']):
orbs_names.append(f'{idx_atom} {atomtype} {m_name} {n + 1}({n_max})')
orbs_data.append({'atom_type': atomtype,
'atom_index': idx_atom,
'l': l,
'm': m,
'orb_name': m_name})
elif l > 2:
raise NotImplementedError('Only s, p snd d orbitals are currently supported')
ikpt_major = -1
ikpt_minor = -1
for istate, (start, stop) in enumerate(zip(start_lines[:-1], start_lines[1:])):
if nspin == 2:
if data[start].split()[9] == '+1;':
ispin = 0
ikpt_major += 1
ikpt = ikpt_major
elif data[start].split()[9] == '-1;':
ispin = 1
ikpt_minor += 1
ikpt = ikpt_minor
else:
raise ValueError(f'Can\'t determine spin in string {data[start].split()}')
else:
ispin = 0
ikpt = istate
for iband, line in enumerate(range(start + 1, stop)):
proj_coeffs[ispin, ikpt, iband] = [float(k) for k in data[line].split()]
return BandProjections(proj_coeffs, weights / 2, species, norbs_per_atomtype, orbs_names, orbs_data)
def get_PDOS(self,
atom_numbers: list[int] | int,
eigenvals: Eigenvals,
get_orbs_names: bool = False,
specific_l: int = None,
dE: float = 0.01,
emin: float = None,
emax: float = None,
zero_at_fermi: bool = False,
sigma: float = 0.02,
efermi: float = None) -> Union[tuple[NDArray[Shape['Ngrid'], Number],
NDArray[Shape['Nspin, Norbs_selected, Ngrid'], Number]],
tuple[NDArray[Shape['Ngrid'], Number],
NDArray[Shape['Nspin, Norbs_selected, Ngrid'], Number],
list[str]]]:
self.eigenvalues = eigenvals.eigenvalues
if isinstance(atom_numbers, int):
atom_numbers = [atom_numbers]
if zero_at_fermi is True and efermi is None:
raise ValueError('You can not set zero_at_fermi=True if you did not specify efermi value')
if emin is None:
emin = np.min(self.eigenvalues) - 1
if emax is None:
emax = np.max(self.eigenvalues) + 1
E_arr = np.arange(emin, emax, dE)
ngrid = E_arr.shape[0]
idxs = []
for atom in atom_numbers:
start = sum([self.norbs_per_atomtype[i] for i in self.species[:atom]])
for i in range(self.norbs_per_atomtype[self.species[atom]]):
idxs.append(start + i)
if specific_l is not None:
idxs = [idx for idx in idxs if self.orbs_data[idx]['l'] == specific_l]
proj_coeffs_weighted = self.proj_coeffs[:, :, :, idxs]
for spin in range(self.nspin):
for i, weight_kpt in enumerate(self.weights):
proj_coeffs_weighted[spin, i] *= weight_kpt
W_arr = np.moveaxis(proj_coeffs_weighted, [1, 2, 3], [2, 3, 1])
G_arr = EBS.gaussian_smearing(E_arr, self.eigenvalues, sigma)
PDOS_arr = np.zeros((self.nspin, len(idxs), ngrid))
for spin in range(self.nspin):
for idx in range(len(idxs)):
PDOS_arr[spin, idx] = np.sum(G_arr[spin, :, :, :] * W_arr[spin, idx, :, :, None],
axis=(0, 1))
if self.nspin == 1:
PDOS_arr *= 2
if get_orbs_names:
if zero_at_fermi:
return E_arr - efermi, PDOS_arr, [self.orbs_names[i] for i in idxs]
else:
return E_arr, PDOS_arr, [self.orbs_names[i] for i in idxs]
else:
if zero_at_fermi:
return E_arr - efermi, PDOS_arr
else:
return E_arr, PDOS_arr
def VVSHE_2_mu_Ha(V):
V_ref = 4.66
return - (V_ref + V) * eV2Hartree
def mu_Ha_2_VVSHE(mu):
V_ref = 4.66
return - (mu * Hartree2eV + V_ref)

View File

@@ -0,0 +1,117 @@
from monty.re import regrep
import matplotlib.pyplot as plt
from pathlib import Path
from echem.io_data.jdftx import Output
from echem.core.constants import Hartree2eV
import numpy as np
def get_energies_from_logs(folderpath, plot=False, dpi=200):
patterns = {'en': r'(\d+).(\d+)\s+Current Energy:\s+(.\d+\.\d+)', 'iter': r'Now starting iteration (\d+) on\s+\[(.+)\]'}
NEBlogpath = Path(folderpath) / 'logfile_NEB.log'
pylogpath = Path(folderpath) / 'py.log'
matches_neb = regrep(str(NEBlogpath), patterns)
matches_py = regrep(str(pylogpath), patterns)
iterations_number = len(matches_py['iter'])
energies = []
n_images_list = []
for i in range(iterations_number):
energies.append([])
images_list = matches_py['iter'][i][0][1].split(', ')
energies[i] = {key: [] for key in images_list}
n_images = len(images_list)
n_images_list.append(n_images)
for i in range(len(matches_neb['en'])):
iteration = int(matches_neb['en'][i][0][0])
image = matches_neb['en'][i][0][1]
energies[iteration - 1][image].append(float(matches_neb['en'][i][0][2]))
if plot:
max_i = 0
for i in range(len(energies)):
plt.figure(dpi=dpi)
barrier = []
all_images = []
for image in energies[i].keys():
if int(image) > max_i:
max_i = int(image)
plt.scatter([int(image) for _ in range(len(energies[i][image]))], energies[i][image], c=f'C{int(image)}')
if len(energies[i][image]) != 0:
plt.scatter(int(image), energies[i][image][-1], c=f'C{int(image)}')
barrier.append(energies[i][image][-1])
all_images.append(int(image))
plt.plot(all_images, barrier, c='black')
return plt, energies
else:
return energies
def get_energies_from_outs(folderpath, opt_history=False, plot=False, dpi=200):
folderpath /= 'iterations'
neb_barriers_hist = []
neb_barriers = []
for iter, iter_path in enumerate(folderpath.glob('iter_*')):
neb_barriers.append([])
neb_barriers_hist.append([])
for f_path in iter_path.glob('[0-9]'):
out = Output.from_file(f_path / 'output.out')
if opt_history:
neb_barriers_hist[iter].append(out.energy_ionic_hist['G'] * Hartree2eV)
neb_barriers[iter].append(out.energy_ionic_hist['G'][-1] * Hartree2eV)
if plot:
if opt_history:
for i, barrier in enumerate(neb_barriers_hist):
plt.figure(dpi=dpi)
plt.title(f'Iteration {i}')
for i, traj in enumerate(barrier):
plt.plot(traj, label=i)
plt.legend(frameon=False)
plt.figure(dpi=dpi)
for i, barrier in enumerate(neb_barriers):
plt.plot(barrier, label=i)
plt.legend(frameon=False)
return plt, neb_barriers, neb_barriers_hist
else:
plt.figure(dpi=dpi)
for i, barrier in enumerate(neb_barriers):
plt.plot(barrier, label=i)
plt.legend(frameon=False)
return plt, neb_barriers
else:
return neb_barriers
def get_energies_from_pylog(filepath, plot=False, dpi=200):
energies = []
with open(filepath) as f:
data = f.readlines()
for line in data:
if 'Energies after iteration' in line:
energies.append(list(map(float, line.strip().split('[')[1][:-1].split(', '))))
if plot:
plt.figure(dpi=dpi)
for i, e in enumerate(energies):
plt.plot(e, label=i)
plt.legend(frameon=False)
return plt, energies
else:
return energies
def get_energies_from_NEBlog(folderpath, plot=False, dpi=200):
patterns = {'en': r'(\d+)\s+Current Energy:\s+(.\d+\.\d+)', \
'images': r'Successfully initialized JDFTx calculator(.+)/(\d+)'}
NEBlogpath = Path(folderpath) / 'logfile_NEB.log'
matches_neb = regrep(str(NEBlogpath), patterns)
nimages = len(matches_neb['images'])
energies = [[] for i in range(nimages)]
for i in range(len(matches_neb['en'])):
image = int(matches_neb['en'][i][0][0])
energies[image-1].append(float(matches_neb['en'][i][0][1]))
if plot:
plt.figure(dpi=dpi)
barrier = []
all_images = []
for image in range(len(energies)):
plt.scatter([image for _ in range(len(energies[image]))], energies[image], c=f'C{image}')
barrier.append(energies[image][-1])
all_images.append(int(image))
plt.plot(all_images, barrier, c='black')
return plt, energies
else:
return energies

View File

@@ -0,0 +1,171 @@
import numpy as np
import pandas as pd
import re
from monty.re import regrep
from tqdm import tqdm
from .universal import Xyz
from nptyping import NDArray, Shape, Number
class SCFLog:
""""""
def __init__(self, eigenvalues=None, occupation=None, mol_orbs=None):
""""""
self.eigenvalues = eigenvalues
self.occupations = occupation
self.mol_orbs = mol_orbs
@property
def natoms(self):
if self.mol_orbs is not None:
return np.max(self.mol_orbs[0]['atom_ids']) + 1
else:
return ValueError('natoms might be calculated only if mol_orbs had been read')
@property
def nbands(self):
if self.eigenvalues is not None:
return len(self.eigenvalues[0])
elif self.mol_orbs is not None:
return len(self.mol_orbs[0].columns) - 3
else:
return ValueError('nbands might be calculated only if eigenvalues or mol_orbs had been read')
@property
def nsteps(self):
if self.eigenvalues is not None:
return len(self.eigenvalues)
elif self.mol_orbs is not None:
return len(self.mol_orbs)
else:
return ValueError('nbands might be calculated only if eigenvalues or mol_orbs had been read')
@staticmethod
def from_file(filepath):
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'eigs': 'ORBITAL ENERGIES',
'mos': 'MOLECULAR ORBITALS'}
matches = regrep(filepath, patterns)
occs = []
eigs = []
for match in tqdm(matches['eigs'], desc='Eigenvalues', total=len(matches['eigs'])):
eigs_tmp = []
occs_tmp = []
i = match[1] + 4
while data[i] != '\n' and data[i] != '------------------\n':
line = data[i].split()
occs_tmp.append(float(line[1]))
eigs_tmp.append(float(line[3]))
i += 1
occs.append(occs_tmp)
eigs.append(eigs_tmp)
mos_arr = []
for match in tqdm(matches['mos'], desc='Molecular Orbitals', total=len(matches['mos'])):
df = pd.DataFrame()
first_columns_appended = None
last_batch_added = False
i = match[1] + 2
while data[i] != '\n' and data[i] != '------------------\n':
if re.match(r'\s*\w+\s+\w+\s+([-+]?\d*\.\d*\s+)+', data[i]) is not None:
last_batch_added = False
line = data[i].split()
if first_columns_appended is False:
atom_number = re.match(r'\d+', line[0])
mos_tmp[0].append(int(atom_number[0]))
atom_symbol = line[0][len(atom_number[0]):]
mos_tmp[1].append(atom_symbol)
orbital = line[1]
mos_tmp[2].append(orbital)
for j, value in enumerate(line[2:]):
mos_tmp[3 + j].append(float(value))
i += 1
elif first_columns_appended is True:
for j, value in enumerate(line[2:]):
mos_tmp[j].append(float(value))
i += 1
else:
pass
elif re.match(r'\s*(\d+\s+)+', data[i]) is not None:
line = data[i].split()
if first_columns_appended is False:
first_columns_appended = True
last_batch_added = True
df['atom_ids'] = mos_tmp[0][1:]
df['species'] = mos_tmp[1][1:]
df['orbital'] = mos_tmp[2][1:]
for j in range(3, len(mos_tmp)):
df[mos_tmp[j][0]] = mos_tmp[j][1:]
mos_tmp = [[] for _ in range(len(line))]
for j, n_mo in enumerate(line):
mos_tmp[j].append(int(n_mo))
i += 1
elif first_columns_appended is None:
last_batch_added = True
mos_tmp = [[] for j in range(len(line) + 3)]
mos_tmp[0].append('')
mos_tmp[1].append('')
mos_tmp[2].append('')
for j, n_mo in enumerate(line):
mos_tmp[3 + j].append(int(n_mo))
first_columns_appended = False
i += 1
elif first_columns_appended is True:
last_batch_added = True
for j in range(len(mos_tmp)):
df[mos_tmp[j][0]] = mos_tmp[j][1:]
mos_tmp = [[] for _ in range(len(line))]
for j, n_mo in enumerate(line):
mos_tmp[j].append(int(n_mo))
i += 1
else:
i += 1
if not last_batch_added:
# df = pd.concat([df, pd.DataFrame(mos_tmp)], axis=1)
for j in range(len(mos_tmp)):
df[mos_tmp[j][0]] = mos_tmp[j][1:]
mos_arr.append(df)
return SCFLog(np.array(eigs), np.array(occs), mos_arr)
class XyzTrajectory:
def __init__(self,
first_xyz: Xyz,
trajectory: NDArray[Shape['Nsteps, Natoms, 3'], Number],
energies_pot: NDArray[Shape['Nsteps'], Number]):
self.first_xyz = first_xyz
self.trajectory = trajectory
self.energies_pot = energies_pot
@staticmethod
def from_file(filepath):
first_xyz = Xyz.from_file(filepath)
trajectory = []
energies_pot = []
with open(filepath, 'rt') as file:
while True:
try:
natoms = int(file.readline().strip())
except:
break
line = file.readline()
energies_pot.append(float(line.split()[5]))
#energies_pot.append(float(line.split()[8].split('=')[1]))
coords = np.zeros((natoms, 3))
for i in range(natoms):
line = file.readline().split()
coords[i] = [float(j) for j in line[1:]]
trajectory.append(coords)
return XyzTrajectory(first_xyz, np.array(trajectory), np.array(energies_pot))

View File

@@ -0,0 +1,129 @@
import numpy as np
from monty.re import regrep
import itertools
import typing
class QEOutput:
def __init__(self):
self.patterns = {'nkpts': r'number of k points=\s+([\d]+)',
'kpts_coord': r'k\s*=\s*(-?\d.[\d]+)\s*(-?\d.[\d]+)\s*(-?\d.[\d]+)\s*\([\d]+ PWs\)',
'occupations': 'occupation numbers',
'efermi': r'the Fermi energy is\s*(-?[\d]+.[\d]+) ev'}
self.eigenvalues = None
self.weights = None
self.occupations = None
self.efermi = None
self.nkpt = None
@staticmethod
def _GaussianSmearing(x, x0, sigma):
"""Simulate the Delta function by a Gaussian shape function"""
return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2))
def from_file(self, filepath):
matches = regrep(filepath, self.patterns)
if len(matches['kpts_coord']) != 0:
with open(filepath, 'rt') as file:
file_data = file.readlines()
eigenvalues = []
for start, end in zip(matches['kpts_coord'], matches['occupations']):
data = file_data[start[1] + 2:end[1] - 1]
data = [float(i) for i in itertools.chain.from_iterable([line.split() for line in data])]
eigenvalues.append(data)
self.eigenvalues = np.array(eigenvalues)
occupations = []
n_strings_occups = matches['occupations'][0][1] - matches['kpts_coord'][0][1] - 1
for start in matches['occupations']:
data = file_data[start[1] + 1: start[1] + n_strings_occups]
data = [float(i) for i in itertools.chain.from_iterable([line.split() for line in data])]
occupations.append(data)
self.occupations = np.array(occupations)
self.efermi = float(matches['efermi'][0][0][0])
self.nkpt = int(matches['nkpts'][0][0][0])
weights = np.zeros(self.nkpt)
for i in range(self.nkpt):
weights[i] = file_data[matches['nkpts'][0][1]+2+i].split()[-1]
self.weights = weights
def get_band_eigs(self, bands):
if type(bands) is int:
return np.array([eig for eig in self.eigenvalues[:, bands]])
elif isinstance(bands, typing.Iterable):
return np.array([[eig for eig in self.eigenvalues[:, band]] for band in bands])
else:
raise ValueError('Variable bands should be int or iterable')
def get_band_occ(self, bands):
if type(bands) is int:
return [occ for occ in self.occupations[:, bands]]
elif isinstance(bands, typing.Iterable):
return np.array([[occ for occ in self.occupations[:, band]] for band in bands])
else:
raise ValueError('Variable bands should be int or iterable')
def get_DOS(self, **kwargs):
"""Calculate Density of States based on eigenvalues and its weights
Args:
dE (float): step of energy array in function output
zero_at_fermi (bool, optional): if True Fermi energy will be equal to zero
sm_param (dict, optional): parameters for smooth DOS.
E_min (float, str): minimum value in DOS calculation. If E_min == 'min' left border of energy
is equal to the minimum eigenvalue
E_max (float, str): maximum value in DOS calculation. If E_max == 'max' right border of energy
is equal to the maximum eigenvalue
bw_method (float): The method used to calculate the estimator bandwidth. This can be 'scott',
'silverman', a scalar constant or a callable. If a scalar, this will be used directly as `kde.factor`.
If a callable, it should take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used.
nelec (int): Number of electrons in the system. DOS integral to efermi should be equal to the nelec
Returns:
E, DOS - Two 1D np.arrays that contain energy and according DOS values
"""
if 'zero_at_fermi' in kwargs:
zero_at_fermi = kwargs['zero_at_fermi']
else:
zero_at_fermi = False
if 'dE' in kwargs:
dE = kwargs['dE']
else:
dE = 0.01
if 'smearing' in kwargs:
smearing = kwargs['smearing']
else:
smearing = 'Gaussian'
if smearing == 'Gaussian':
if 'sigma' in kwargs:
sigma = kwargs['sigma']
else:
sigma = 0.02
if 'emin' in kwargs:
E_min = kwargs['emin']
else:
E_min = np.min(self.eigenvalues)
if 'emax' in kwargs:
E_max = kwargs['emax']
else:
E_max = np.max(self.eigenvalues)
E_arr = np.arange(E_min, E_max, dE)
DOS_arr = np.zeros_like(E_arr)
for energy_kpt, weight in zip(self.eigenvalues, self.weights):
for energy in energy_kpt:
DOS_arr += weight * self._GaussianSmearing(E_arr, energy, sigma)
# 2 is not used because sum(weights) = 2
if zero_at_fermi:
return E_arr - self.efermi, DOS_arr
else:
return E_arr, DOS_arr

View File

@@ -0,0 +1,394 @@
import numpy as np
from echem.core.constants import ElemNum2Name, ElemName2Num, Bohr2Angstrom, Angstrom2Bohr
from echem.core.structure import Structure
import warnings
class Cube:
def __init__(self,
data: np.ndarray,
structure: Structure,
origin: np.ndarray,
units_data: str = 'Bohr',
comment: str = None,
charges=None,
dset_ids=None):
self.volumetric_data = data
self.structure = structure
self.origin = origin
self.units_data = units_data
if comment is None:
self.comment = 'Comment is not defined\nGood luck!\n'
else:
self.comment = comment
if charges is None:
self.charges = np.zeros(structure.natoms)
else:
self.charges = charges
self.dset_ids = dset_ids
def __repr__(self):
shape = self.volumetric_data.shape
return f'{self.comment}\n' + f'NX: {shape[0]}\nNY: {shape[1]}\nNZ: {shape[2]}\n' + \
f'Origin:\n{self.origin[0]:.5f} {self.origin[1]:.5f} {self.origin[2]:.5f}\n' + \
repr(self.structure)
def __add__(self, other):
assert isinstance(other, Cube), 'Other object must belong to Cube class'
assert np.array_equal(self.origin, other.origin), 'Two Cube instances must have the same origin'
assert self.volumetric_data.shape == other.volumetric_data.shape, 'Two Cube instances must have ' \
'the same shape of volumetric_data'
if self.structure != other.structure:
warnings.warn('Two Cube instances have different structures. '
'The structure will be taken from the 1st (self) instance. '
'Hope you know, what you are doing')
return Cube(self.volumetric_data + other.volumetric_data, self.structure, self.origin)
def __sub__(self, other):
assert isinstance(other, Cube), 'Other object must belong to Cube class'
assert np.array_equal(self.origin, other.origin), 'Two Cube instances must have the same origin'
assert self.volumetric_data.shape == other.volumetric_data.shape, 'Two Cube instances must have ' \
'the same shape of volumetric_data'
if self.structure != other.structure:
warnings.warn('\nTwo Cube instances have different structures. '
'The structure will be taken from the 1st (self) instance. '
'Hope you know, what you are doing')
return Cube(self.volumetric_data - other.volumetric_data, self.structure, self.origin)
def __neg__(self):
return Cube(-self.volumetric_data, self.structure, self.origin)
def __mul__(self, other):
assert isinstance(other, Cube), 'Other object must belong to Cube class'
assert np.array_equal(self.origin, other.origin), 'Two Cube instances must have the same origin'
assert self.volumetric_data.shape == other.volumetric_data.shape, 'Two Cube instances must have ' \
'the same shape of volumetric_data'
if self.structure != other.structure:
warnings.warn('\nTwo Cube instances have different structures. '
'The structure will be taken from the 1st (self) instance. '
'Hope you know, what you are doing')
return Cube(self.volumetric_data * other.volumetric_data, self.structure, self.origin)
@staticmethod
def from_file(filepath):
with open(filepath, 'rt') as file:
comment_1 = file.readline()
comment_2 = file.readline()
comment = comment_1 + comment_2
line = file.readline().split()
natoms = int(line[0])
if natoms < 0:
dset_ids_flag = True
natoms = abs(natoms)
else:
dset_ids_flag = False
origin = np.array([float(line[1]), float(line[2]), float(line[3])])
if len(line) == 4:
n_data = 1
elif len(line) == 5:
n_data = int(line[4])
line = file.readline().split()
NX = int(line[0])
xaxis = np.array([float(line[1]), float(line[2]), float(line[3])])
line = file.readline().split()
NY = int(line[0])
yaxis = np.array([float(line[1]), float(line[2]), float(line[3])])
line = file.readline().split()
NZ = int(line[0])
zaxis = np.array([float(line[1]), float(line[2]), float(line[3])])
if NX > 0 and NY > 0 and NZ > 0:
units = 'Bohr'
elif NX < 0 and NY < 0 and NZ < 0:
units = 'Angstrom'
else:
raise ValueError('The sign of the number of all voxels should be > 0 or < 0')
if units == 'Angstrom':
NX, NY, NZ = -NX, -NY, -NZ
lattice = np.array([xaxis * NX, yaxis * NY, zaxis * NZ])
species = []
charges = np.zeros(natoms)
coords = np.zeros((natoms, 3))
for atom in range(natoms):
line = file.readline().split()
species += [ElemNum2Name[int(line[0])]]
charges[atom] = float(line[1])
coords[atom, :] = line[2:]
if units == 'Bohr':
lattice = Bohr2Angstrom * lattice
coords = Bohr2Angstrom * coords
origin = Bohr2Angstrom * origin
structure = Structure(lattice, species, coords, coords_are_cartesian=True)
dset_ids = None
dset_ids_processed = -1
if dset_ids_flag is True:
dset_ids = []
line = file.readline().split()
n_data = int(line[0])
if n_data < 1:
raise ValueError(f'Bad value of n_data: {n_data}')
dset_ids_processed += len(line)
dset_ids += [int(i) for i in line[1:]]
while dset_ids_processed < n_data:
line = file.readline().split()
dset_ids_processed += len(line)
dset_ids += [int(i) for i in line]
dset_ids = np.array(dset_ids)
if n_data != 1:
raise NotImplemented(f'The processing of cube files with more than 1 data values is not implemented.'
f' n_data = {n_data}')
data = np.zeros((NX, NY, NZ))
indexes = np.arange(0, NX * NY * NZ)
indexes_1 = indexes // (NY * NZ)
indexes_2 = (indexes // NZ) % NY
indexes_3 = indexes % NZ
i = 0
for line in file:
for value in line.split():
data[indexes_1[i], indexes_2[i], indexes_3[i]] = float(value)
i += 1
return Cube(data, structure, origin, units, comment, charges, dset_ids)
#def reduce(self, factor):
# from skimage.measure import block_reduce
# try:
# volumetric_data_reduced = block_reduce(self.volumetric_data, block_size=(factor, factor, factor), func=np.mean)
# Ns_reduced = np.shape(volumetric_data_reduced)
# except:
# raise ValueError('Try another factor value')
# return Cube(volumetric_data_reduced, self.structure, self.comment, Ns_reduced, self.charges)
def to_file(self, filepath, units='Bohr'):
if not self.structure.coords_are_cartesian:
self.structure.mod_coords_to_cartesian()
Ns = np.array(self.volumetric_data.shape)
width_Ni = len(str(np.max(Ns)))
if units == 'Angstrom':
Ns = - Ns
width_Ni += 1
width_lattice = len(str(int(np.max(self.structure.lattice)))) + 7
width_coord = len(str(int(np.max(self.structure.coords)))) + 7
elif units == 'Bohr':
lattice = self.get_voxel() * Angstrom2Bohr
coords = self.structure.coords * Angstrom2Bohr
origin = self.origin * Angstrom2Bohr
width_lattice = len(str(int(np.max(lattice)))) + 7
width_coord = len(str(int(np.max(coords)))) + 7
else:
raise ValueError(f'Irregular units flag: {units}. Units must be \'Bohr\' or \'Angstrom\'')
if np.sum(self.structure.lattice < 0):
width_lattice += 1
if np.sum(self.structure.coords < 0):
width_coord += 1
width = np.max([width_lattice, width_coord])
if self.dset_ids is not None:
natoms = - self.structure.natoms
else:
natoms = self.structure.natoms
width_natoms = len(str(natoms))
width_1_column = max(width_Ni, width_natoms)
with open(filepath, 'w') as file:
file.write(self.comment)
if units == 'Angstrom':
file.write(f' {natoms:{width_1_column}} {self.origin[0]:{width}.6f} '
f' {self.origin[1]:{width}.6f} {self.origin[2]:{width}.6f}\n')
for N_i, lattice_vector in zip(Ns, self.get_voxel()):
file.write(f' {N_i:{width_1_column}} {lattice_vector[0]:{width}.6f} '
f' {lattice_vector[1]:{width}.6f} {lattice_vector[2]:{width}.6f}\n')
for atom_name, charge, coord in zip(self.structure.species, self.charges, self.structure.coords):
file.write(
f' {ElemName2Num[atom_name]:{width_1_column}} {charge:{width}.6f} '
f' {coord[0]:{width}.6f} {coord[1]:{width}.6f} {coord[2]:{width}.6f}\n')
elif units == 'Bohr':
file.write(f' {natoms:{width_1_column}} {origin[0]:{width}.6f} '
f' {origin[1]:{width}.6f} {origin[2]:{width}.6f}\n')
for N_i, lattice_vector in zip(Ns, lattice):
file.write(f' {N_i:{width_1_column}} {lattice_vector[0]:{width}.6f} '
f' {lattice_vector[1]:{width}.6f} {lattice_vector[2]:{width}.6f}\n')
for atom_name, charge, coord in zip(self.structure.species, self.charges, coords):
file.write(
f' {ElemName2Num[atom_name]:{width_1_column}} {charge:{width}.6f} '
f' {coord[0]:{width}.6f} {coord[1]:{width}.6f} {coord[2]:{width}.6f}\n')
else:
raise ValueError(f'Irregular units flag: {units}. Units must be \'Bohr\' or \'Angstrom\'')
if self.dset_ids is not None:
m = len(self.dset_ids)
file.write(f' {m:{width_1_column}}' + ' ')
for dset_id in self.dset_ids:
file.write(str(dset_id) + ' ')
file.write('\n')
for i in range(abs(Ns[0])):
for j in range(abs(Ns[1])):
for k in range(abs(Ns[2])):
file.write(str(' %.5E' % self.volumetric_data[i][j][k]))
if k % 6 == 5:
file.write('\n')
file.write('\n')
def mod_to_zero_origin(self):
self.structure.coords -= self.origin
self.origin = np.zeros(3)
def get_average_along_axis(self, axis):
"""
Gets average value along axis
Args:
axis (int):
if 0 than average along x wil be calculated
if 1 along y
if 2 along z
Returns:
np.array of average value along selected axis
"""
if axis == 2:
return np.mean(self.volumetric_data, (0, 1))
elif axis == 1:
return np.mean(self.volumetric_data, (0, 2))
elif axis == 0:
return np.mean(self.volumetric_data, (1, 2))
else:
raise ValueError('axis can be only 0, 1 or 2')
def get_average_along_axis_max(self, axis: int, scale=None):
"""Calculate the vacuum level (the maximum planar average value along selected axis)
Args:
axis (int): The axis number along which the planar average is calculated. The first axis is 0
scale (float): The value that is multiplying by the result. It's used for converting between
different units
Returns:
(float): The vacuum level multiplied by scale factor
"""
avr = self.get_average_along_axis(axis)
if scale is None:
return np.max(avr)
else:
return scale * np.max(avr)
def get_voxel(self, units='Angstrom'):
NX, NY, NZ = self.volumetric_data.shape
voxel = self.structure.lattice.copy()
voxel[0] /= NX
voxel[1] /= NY
voxel[2] /= NZ
if units == 'Angstrom':
return voxel
elif units == 'Bohr':
return voxel * Angstrom2Bohr
else:
raise ValueError('units can be \'Angstrom\' or \'Bohr\'')
def get_integrated_number(self):
if self.units_data == 'Bohr':
voxel_volume = np.linalg.det(self.get_voxel(units='Bohr'))
return voxel_volume * np.sum(self.volumetric_data)
else:
raise NotImplemented()
def assign_top_n_data_to_atoms(self, n_top, r):
"""Assign top n abs of volumetric data to atoms. Might be used to assign electron density to atoms.
Args:
n_top (int): Number of voxels that will be analysed
r (float): Radius. A voxel is considered belonging to atom is the distance between the voxel center and ]
atom is less than r.
Returns:
(np.ndarray): Array of boolean values. I-th raw represents i-th atom, j-th column represents j-th voxel
"""
sorted_indices = np.array(np.unravel_index(np.argsort(-np.abs(self.volumetric_data), axis=None),
self.volumetric_data.shape)).T
translation_vector = np.sum(self.structure.lattice, axis=0)
voxels_centres = sorted_indices[:n_top, :] * translation_vector + translation_vector / 2 + self.origin
atom_indices = list(range(self.structure.natoms))
if self.structure.natoms == 1:
return np.linalg.norm(voxels_centres - self.structure.coords[0], axis=-1) < r
else:
return np.linalg.norm(np.broadcast_to(voxels_centres, (self.structure.natoms,) + voxels_centres.shape) -
np.expand_dims(self.structure.coords[atom_indices], axis=1), axis=-1) < r
class Xyz:
def __init__(self, structure, comment):
self.structure = structure
self.comment = comment
@staticmethod
def from_file(filepath):
with open(filepath, 'rt') as file:
natoms = int(file.readline().strip())
comment = file.readline()
coords = np.zeros((natoms, 3))
species = []
for i in range(natoms):
line = file.readline().split()
species.append(line[0])
coords[i] = [float(j) for j in line[1:]]
struct = Structure(np.zeros((3, 3)), species, coords, coords_are_cartesian=True)
return Xyz(struct, comment)
class XyzTrajectory:
def __init__(self, first_xyz, trajectory):
self.first_xyz = first_xyz
self.trajectory = trajectory
@staticmethod
def from_file(filepath):
first_xyz = Xyz.from_file(filepath)
trajectory = []
with open(filepath, 'rt') as file:
while True:
try:
natoms = int(file.readline().strip())
except:
break
file.readline()
coords = np.zeros((natoms, 3))
for i in range(natoms):
line = file.readline().split()
coords[i] = [float(j) for j in line[1:]]
trajectory.append(coords)
return XyzTrajectory(first_xyz, np.array(trajectory))

View File

@@ -0,0 +1,675 @@
from __future__ import annotations
import numpy as np
from typing import Union, List, Iterable
from monty.re import regrep
from echem.core.structure import Structure
from ..io_data.universal import Cube
from echem.core.electronic_structure import EBS
from echem.core.ionic_dynamics import IonicDynamics
from echem.core.constants import Angstrom2Bohr
from . import jdftx
from pymatgen.io.vasp import Procar as Procar_pmg
from nptyping import NDArray, Shape, Number
from pathlib import Path
import warnings
class Poscar:
"""Class that reads VASP POSCAR files"""
def __init__(self,
structure: Structure,
comment: str = None,
sdynamics_data: list = None):
"""
Create a Poscar instance
Args:
structure (Structure class): a base class that contains lattice, coords and species information
comment (str): a VASP comment
sdynamics_data (list, 2D np.array): data about selective dynamics for each atom. [['T', 'T', 'F'],
['F', 'F', 'F'],...]
"""
self.structure = structure
self.comment = comment
self.sdynamics_data = sdynamics_data
def __repr__(self):
return f'{self.comment}\n' + repr(self.structure)
@staticmethod
def from_file(filepath: str | Path):
"""
Static method to read a POSCAR file
Args:
filepath: path to the POSCAR file
Returns:
Poscar class object
"""
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'r')
data = file.readlines()
file.close()
comment = data[0].strip()
scale = float(data[1])
lattice = np.array([[float(i) for i in line.split()] for line in data[2:5]])
if scale < 0:
# In VASP, a negative scale factor is treated as a volume.
# We need to translate this to a proper lattice vector scaling.
vol = abs(np.linalg.det(lattice))
lattice *= (-scale / vol) ** (1 / 3)
else:
lattice *= scale
name_species = data[5].split()
num_species = [int(i) for i in data[6].split()]
species = []
for name, num in zip(name_species, num_species):
species += [name]*num
sdynamics_is_used = False
start_atoms = 8
if data[7][0] in 'sS':
sdynamics_is_used = True
start_atoms = 9
coords_are_cartesian = False
if sdynamics_is_used:
if data[8][0] in 'cCkK':
coords_are_cartesian = True
else:
if data[7][0] in 'cCkK':
coords_are_cartesian = True
coords = []
coords_scale = scale if coords_are_cartesian else 1
sdynamics_data = list() if sdynamics_is_used else None
for i in range(start_atoms, start_atoms + np.sum(num_species), 1):
line = data[i].split()
coords.append([float(j) * coords_scale for j in line[:3]])
if sdynamics_is_used:
for i in range(start_atoms, start_atoms + np.sum(num_species), 1):
line = data[i].split()
sdynamics_data.append([j for j in line[3:6]])
struct = Structure(lattice, species, coords, coords_are_cartesian)
if sdynamics_is_used:
return Poscar(struct, comment, sdynamics_data)
else:
return Poscar(struct, comment)
def to_file(self, filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'w')
file.write(f'{self.comment}\n')
file.write('1\n')
for vector in self.structure.lattice:
file.write(f' {vector[0]} {vector[1]} {vector[2]}\n')
species = np.array(self.structure.species)
sorted_order = np.argsort(species, kind='stable')
unique, counts = np.unique(species, return_counts=True)
line = ' '
for u in unique:
line += u + ' '
file.write(line + '\n')
line = ' '
for c in counts:
line += str(c) + ' '
file.write(line + '\n')
if self.sdynamics_data is not None:
file.write('Selective dynamics\n')
if self.structure.coords_are_cartesian:
file.write('Cartesian\n')
else:
file.write('Direct\n')
if self.sdynamics_data is None:
for i in sorted_order:
atom = self.structure.coords[i]
file.write(f' {atom[0]} {atom[1]} {atom[2]}\n')
else:
for i in sorted_order:
atom = self.structure.coords[i]
sd_atom = self.sdynamics_data[i]
file.write(f' {atom[0]} {atom[1]} {atom[2]} {sd_atom[0]} {sd_atom[1]} {sd_atom[2]}\n')
file.close()
def convert(self, format):
if format == 'jdftx':
self.mod_coords_to_cartesian()
return jdftx.Ionpos(self.structure.species, self.structure.coords * Angstrom2Bohr), \
jdftx.Lattice(np.transpose(self.structure.lattice) * Angstrom2Bohr)
else:
raise ValueError('Only format = jdftx is supported')
def mod_add_atoms(self, coords, species, sdynamics_data=None):
self.structure.mod_add_atoms(coords, species)
if sdynamics_data is not None:
if any(isinstance(el, list) for el in sdynamics_data):
for sd_atom in sdynamics_data:
self.sdynamics_data.append(sd_atom)
else:
self.sdynamics_data.append(sdynamics_data)
def mod_change_atoms(self, ids: Union[int, Iterable],
new_coords: Union[Iterable[float], Iterable[Iterable[float]]] = None,
new_species: Union[str, List[str]] = None,
new_sdynamics_data: Union[Iterable[str], Iterable[Iterable[str]]] = None):
self.structure.mod_change_atoms(ids, new_coords, new_species)
if new_sdynamics_data is not None:
if self.sdynamics_data is None:
self.sdynamics_data = [['T', 'T', 'T'] for _ in range(self.structure.natoms)]
if isinstance(ids, Iterable):
for i, new_sdata in zip(ids, new_sdynamics_data):
self.sdynamics_data[i] = new_sdata
else:
self.sdynamics_data[ids] = new_sdynamics_data
def mod_coords_to_box(self):
assert self.structure.coords_are_cartesian is False, 'This operation allowed only for NON-cartesian coords'
self.structure.coords %= 1
def mod_coords_to_direct(self):
self.structure.mod_coords_to_direct()
def mod_coords_to_cartesian(self):
self.structure.mod_coords_to_cartesian()
class Outcar(EBS, IonicDynamics):
"""Class that reads VASP OUTCAR files"""
def __init__(self,
weights: NDArray[Shape['Nkpts'], Number],
efermi_hist: NDArray[Shape['Nisteps'], Number],
eigenvalues_hist: NDArray[Shape['Nisteps, Nspin, Nkpts, Nbands'], Number],
occupations_hist: NDArray[Shape['Nisteps, Nspin, Nkpts, Nbands'], Number],
energy_hist: NDArray[Shape['Nallsteps'], Number],
energy_ionic_hist: NDArray[Shape['Nisteps'], Number],
forces_hist: NDArray[Shape['Nispeps, Natoms, 3'], Number]):
EBS.__init__(self, eigenvalues_hist[-1], weights, efermi_hist[-1], occupations_hist[-1])
IonicDynamics.__init__(self, forces_hist, None, None, None)
self.efermi_hist = efermi_hist
self.energy_hist = energy_hist
self.energy_ionic_hist = energy_ionic_hist
self.eigenvalues_hist = eigenvalues_hist
self.occupations_hist = occupations_hist
def __add__(self, other):
"""
Concatenates Outcar files (all histories). It is useful for ionic optimization.
If k-point meshes from two Outcars are different, weights, eigenvalues and occupations will be taken
from the 2nd (other) Outcar instance
Args:
other (Outcar class): Outcar that should be added to the current Outcar
Returns (Outcar class):
New Outcar with concatenated histories
"""
assert isinstance(other, Outcar), 'Other object must belong to Outcar class'
assert self.natoms == other.natoms, 'Number of atoms of two files must be equal'
if not np.array_equal(self.weights, other.weights):
warnings.warn('Two Outcar instances have been calculated with different k-point folding. '
'Weights, eigenvalues and occupations will be taken from the 2nd (other) instance. '
'Hope you know, what you are doing')
return Outcar(other.weights,
np.concatenate((self.efermi_hist, other.efermi_hist)),
other.eigenvalues_hist,
other.occupations_hist,
np.concatenate((self.energy_hist, other.energy_hist)),
np.concatenate((self.energy_ionic_hist, other.energy_ionic_hist)),
np.concatenate((self.forces_hist, other.forces_hist)))
return Outcar(other.weights,
np.concatenate((self.efermi_hist, other.efermi_hist)),
np.concatenate((self.eigenvalues_hist, other.eigenvalues_hist)),
np.concatenate((self.occupations_hist, other.occupations_hist)),
np.concatenate((self.energy_hist, other.energy_hist)),
np.concatenate((self.energy_ionic_hist, other.energy_ionic_hist)),
np.concatenate((self.forces_hist, other.forces_hist)))
@property
def natoms(self):
return self.forces.shape[0]
@property
def nisteps(self):
return self.energy_ionic_hist.shape[0]
@property
def forces(self):
return self.forces_hist[-1]
@property
def energy(self):
return self.energy_ionic_hist[-1]
@staticmethod
def from_file(filepath: str | Path):
if isinstance(filepath, str):
filepath = Path(filepath)
file = open(filepath, 'r')
data = file.readlines()
file.close()
patterns = {'nkpts': r'k-points\s+NKPTS\s+=\s+(\d+)',
'nbands': r'number of bands\s+NBANDS=\s+(\d+)',
'natoms': r'NIONS\s+=\s+(\d+)',
'weights': 'Following reciprocal coordinates:',
'efermi': r'E-fermi\s:\s+([-.\d]+)',
'energy': r'free energy\s+TOTEN\s+=\s+(.\d+\.\d+)\s+eV',
'energy_ionic': r'free energy\s+TOTEN\s+=\s+(.\d+\.\d+)\s+eV',
'kpoints': r'k-point\s+(\d+)\s:\s+[-.\d]+\s+[-.\d]+\s+[-.\d]+\n',
'forces': r'\s+POSITION\s+TOTAL-FORCE',
'spin': r'spin component \d+\n'}
matches = regrep(str(filepath), patterns)
nbands = int(matches['nbands'][0][0][0])
nkpts = int(matches['nkpts'][0][0][0])
natoms = int(matches['natoms'][0][0][0])
energy_hist = np.array([float(i[0][0]) for i in matches['energy']])
energy_ionic_hist = np.array([float(i[0][0]) for i in matches['energy_ionic']])
if matches['spin']:
nspin = 2
else:
nspin = 1
if nkpts == 1:
weights = np.array([float(data[matches['weights'][0][1] + 2].split()[3])])
else:
weights = np.zeros(nkpts)
for i in range(nkpts):
weights[i] = float(data[matches['weights'][0][1] + 2 + i].split()[3])
weights /= np.sum(weights)
arr = matches['efermi']
efermi_hist = np.zeros(len(arr))
for i in range(len(arr)):
efermi_hist[i] = float(arr[i][0][0])
nisteps = len(energy_ionic_hist)
eigenvalues_hist = np.zeros((nisteps, nspin, nkpts, nbands))
occupations_hist = np.zeros((nisteps, nspin, nkpts, nbands))
each_kpoint_list = np.array([[int(j[0][0]), int(j[1])] for j in matches['kpoints']])
for step in range(nisteps):
for spin in range(nspin):
for kpoint in range(nkpts):
arr = data[each_kpoint_list[nkpts * nspin * step + nkpts * spin + kpoint, 1] + 2:
each_kpoint_list[nkpts * nspin * step + nkpts * spin + kpoint, 1] + 2 + nbands]
eigenvalues_hist[step, spin, kpoint] = [float(i.split()[1]) for i in arr]
occupations_hist[step, spin, kpoint] = [float(i.split()[2]) for i in arr]
arr = matches['forces']
forces_hist = np.zeros((nisteps, natoms, 3))
for step in range(nisteps):
for atom in range(natoms):
line = data[arr[step][1] + atom + 2:arr[step][1] + atom + 3]
line = line[0].split()
forces_hist[step, atom] = [float(line[3]), float(line[4]), float(line[5])]
return Outcar(weights, efermi_hist, eigenvalues_hist, occupations_hist,
energy_hist, energy_ionic_hist, forces_hist)
class Wavecar:
"""Class that reads VASP WAVECAR files"""
# TODO: add useful functions for Wavecar class: plot charge density, plot real and imag parts etc.
def __init__(self, kb_array, wavefunctions, ngrid_factor):
self.kb_array = kb_array
self.wavefunctions = wavefunctions
self.ngrid_factor = ngrid_factor
@staticmethod
def from_file(filepath, kb_array, ngrid_factor=1.5):
from echem.core.vaspwfc_p3 import vaspwfc
wfc = vaspwfc(filepath)
wavefunctions = []
for kb in kb_array:
kpoint = kb[0]
band = kb[1]
wf = wfc.wfc_r(ikpt=kpoint, iband=band, ngrid=wfc._ngrid * ngrid_factor)
wavefunctions.append(wf)
return Wavecar(kb_array, wavefunctions, ngrid_factor)
class Procar:
def __init__(self, proj_koeffs, orbital_names):
self.proj_koeffs = proj_koeffs
self.eigenvalues = None
self.weights = None
self.nspin = None
self.nkpts = None
self.nbands = None
self.efermi = None
self.natoms = None
self.norbs = proj_koeffs.shape[4]
self.orbital_names = orbital_names
@staticmethod
def from_file(filepath):
procar = Procar_pmg(filepath)
spin_keys = list(procar.data.keys())
proj_koeffs = np.zeros((len(spin_keys),) + procar.data[spin_keys[0]].shape)
for i, spin_key in enumerate(spin_keys):
proj_koeffs[i] = procar.data[spin_key]
return Procar(proj_koeffs, procar.orbitals)
def get_PDOS(self, outcar: Outcar, atom_numbers, **kwargs):
self.eigenvalues = outcar.eigenvalues
self.weights = outcar.weights
self.nspin = outcar.nspin
self.nkpts = outcar.nkpts
self.nbands = outcar.nbands
self.efermi = outcar.efermi
self.natoms = outcar.natoms
if 'zero_at_fermi' in kwargs:
zero_at_fermi = kwargs['zero_at_fermi']
else:
zero_at_fermi = False
if 'dE' in kwargs:
dE = kwargs['dE']
else:
dE = 0.01
if 'smearing' in kwargs:
smearing = kwargs['smearing']
else:
smearing = 'Gaussian'
if smearing == 'Gaussian':
if 'sigma' in kwargs:
sigma = kwargs['sigma']
else:
sigma = 0.02
if 'emin' in kwargs:
E_min = kwargs['emin']
else:
E_min = np.min(self.eigenvalues)
if 'emax' in kwargs:
E_max = kwargs['emax']
else:
E_max = np.max(self.eigenvalues)
else:
raise ValueError(f'Only Gaussian smearing is supported but you used {smearing} instead')
E_arr = np.arange(E_min, E_max, dE)
ngrid = E_arr.shape[0]
proj_coeffs_weighted = self.proj_koeffs[:, :, :, atom_numbers, :]
for spin in range(self.nspin):
for i, weight_kpt in enumerate(self.weights):
proj_coeffs_weighted[spin, i] *= weight_kpt
W_arr = np.moveaxis(proj_coeffs_weighted, [2, 3, 4], [4, 2, 3])
G_arr = EBS.gaussian_smearing(E_arr, self.eigenvalues, sigma)
PDOS_arr = np.zeros((self.nspin, len(atom_numbers), self.norbs, ngrid))
for spin in range(self.nspin):
for atom in range(len(atom_numbers)):
PDOS_arr[spin, atom] = np.sum(G_arr[spin, :, None, :, :] * W_arr[spin, :, atom, :, :, None],
axis=(0, 2))
if self.nspin == 1:
PDOS_arr *= 2
if zero_at_fermi:
return E_arr - self.efermi, PDOS_arr
else:
return E_arr, PDOS_arr
class Chgcar:
"""
Class for reading CHG and CHGCAR files from vasp
For now, we ignore augmentation occupancies data
"""
def __init__(self, structure, charge_density, spin_density=None):
self.structure = structure
self.charge_density = charge_density
self.spin_density = spin_density
@staticmethod
def from_file(filepath):
poscar = Poscar.from_file(filepath)
structure = poscar.structure
volumetric_data = []
read_data = False
with open(filepath, 'r') as file:
for i in range(8 + structure.natoms):
file.readline()
for line in file:
line_data = line.strip().split()
if read_data:
for value in line_data:
if i < length - 1:
data[indexes_1[i], indexes_2[i], indexes_3[i]] = float(value)
i += 1
else:
data[indexes_1[i], indexes_2[i], indexes_3[i]] = float(value)
read_data = False
volumetric_data.append(data)
else:
if len(line_data) == 3:
try:
shape = np.array(list(map(int, line_data)))
except:
pass
else:
read_data = True
nx, ny, nz = shape
data = np.zeros(shape)
length = np.prod(shape)
i = 0
indexes = np.arange(0, length)
indexes_1 = indexes % nx
indexes_2 = (indexes // nx) % ny
indexes_3 = indexes // (nx * ny)
if len(volumetric_data) == 1:
return Chgcar(structure, volumetric_data[0])
elif len(volumetric_data) == 2:
return Chgcar(structure, volumetric_data[0], volumetric_data[1])
else:
raise ValueError(f'The file contains more than 2 volumetric data, len = {len(volumetric_data)}')
def convert_to_cube(self, volumetric_data='charge_density'):
comment = ' Cube file was created using Electrochemistry package\n'
if volumetric_data == 'charge_density':
return Cube(data=self.charge_density,
structure=self.structure,
comment=comment+' Charge Density\n',
origin=np.zeros(3))
elif volumetric_data == 'spin_density':
return Cube(data=self.spin_density,
structure=self.structure,
comment=comment + ' Spin Density\n',
origin=np.zeros(3))
elif volumetric_data == 'spin_major':
return Cube(data=(self.charge_density + self.spin_density)/2,
structure=self.structure,
comment=comment+' Major Spin\n',
origin=np.zeros(3))
elif volumetric_data == 'spin_minor':
return Cube(data=(self.charge_density - self.spin_density)/2,
structure=self.structure,
comment=comment+' Minor Spin\n',
origin=np.zeros(3))
def to_file(self, filepath):
#TODO write to_file func
pass
class Xdatcar:
"""Class that reads VASP XDATCAR files"""
def __init__(self,
structure,
comment: str = None,
trajectory=None):
"""
Create an Xdatcar instance
Args:
structure (Structure class): a base class that contains lattice, coords and species information
comment (str): a VASP comment
trajectory (3D np.array): contains coordinates of all atoms along with trajectory. It has the shape
n_steps x n_atoms x 3
"""
self.structure = structure
self.comment = comment
self.trajectory = trajectory
def __add__(self, other):
"""
Concatenates Xdatcar files (theirs trajectory)
Args:
other (Xdatcar class): Xdatcar that should be added to the current Xdatcar
Returns (Xdatcar class):
New Xdatcar with concatenated trajectory
"""
assert isinstance(other, Xdatcar), 'Other object must belong to Xdatcar class'
assert np.array_equal(self.structure.lattice, other.structure.lattice), 'Lattices of two files must be equal'
assert self.structure.species == other.structure.species, 'Species in two files must be identical'
assert self.structure.coords_are_cartesian == other.structure.coords_are_cartesian, \
'Coords must be in the same coordinate system'
trajectory = np.vstack((self.trajectory, other.trajectory))
return Xdatcar(self.structure, self.comment + ' + ' + other.comment, trajectory)
def add(self, other):
"""
Concatenates Xdatcar files (theirs trajectory)
Args:
other (Xdatcar class): Xdatcar that should be added to the current Xdatcar
Returns (Xdatcar class):
New Xdatcar with concatenated trajectory
"""
return self.__add__(other)
def add_(self, other):
"""
Concatenates Xdatcar files (theirs trajectory). It's inplace operation, current Xdatcar will be modified
Args:
other (Xdatcar class): Xdatcar that should be added to the current Xdatcar
"""
assert isinstance(other, Xdatcar), 'Other object must belong to Xdatcar class'
assert np.array_equal(self.structure.lattice, other.structure.lattice), 'Lattices of two files mist be equal'
assert self.structure.species == other.structure.species, 'Species in two files must be identical'
assert self.structure.coords_are_cartesian == other.structure.coords_are_cartesian, \
'Coords must be in the same coordinate system'
self.trajectory = np.vstack((self.trajectory, other.trajectory))
@property
def nsteps(self):
return len(self.trajectory)
@staticmethod
def from_file(filepath):
"""
Static method to read a XDATCAR file
Args:
filepath: path to the XDATCAR file
Returns:
Xdatcar class object
"""
file = open(filepath, 'r')
data = file.readlines()
file.close()
comment = data[0].strip()
scale = float(data[1])
lattice = np.array([[float(i) for i in line.split()] for line in data[2:5]])
if scale < 0:
# In VASP, a negative scale factor is treated as a volume.
# We need to translate this to a proper lattice vector scaling.
vol = abs(np.linalg.det(lattice))
lattice *= (-scale / vol) ** (1 / 3)
else:
lattice *= scale
name_species = data[5].split()
num_species = [int(i) for i in data[6].split()]
species = []
for name, num in zip(name_species, num_species):
species += [name] * num
n_atoms = np.sum(num_species)
n_steps = int((len(data) - 7) / (n_atoms + 1))
trajectory = np.zeros((n_steps, n_atoms, 3))
for i in range(n_steps):
atom_start = 8 + i * (n_atoms + 1)
atom_stop = 7 + (i + 1) * (n_atoms + 1)
data_step = [line.split() for line in data[atom_start:atom_stop]]
for j in range(n_atoms):
trajectory[i, j] = [float(k) for k in data_step[j]]
struct = Structure(lattice, species, trajectory[0], coords_are_cartesian=False)
return Xdatcar(struct, comment, trajectory)
def to_file(self, filepath):
file = open(filepath, 'w')
file.write(f'{self.comment}\n')
file.write('1\n')
for vector in self.structure.lattice:
file.write(f' {vector[0]} {vector[1]} {vector[2]}\n')
species = np.array(self.structure.species)
sorted_order = np.argsort(species, kind='stable')
sorted_trajectory = self.trajectory[:, sorted_order, :]
unique, counts = np.unique(species, return_counts=True)
line = ' '
for u in unique:
line += u + ' '
file.write(line + '\n')
line = ' '
for c in counts:
line += str(c) + ' '
file.write(line + '\n')
for i in range(self.nsteps):
file.write(f'Direct configuration= {i + 1}\n')
for j in range(self.structure.natoms):
file.write(f' {sorted_trajectory[i, j, 0]} '
f'{sorted_trajectory[i, j, 1]} '
f'{sorted_trajectory[i, j, 2]}\n')
file.close()
def mod_coords_to_cartesian(self):
if self.structure.coords_are_cartesian is True:
return 'Coords are already cartesian'
else:
self.trajectory = np.matmul(self.trajectory, self.structure.lattice)
self.structure.mod_coords_to_cartesian()
def mod_coords_to_box(self):
assert self.structure.coords_are_cartesian is False, 'This operation allowed only for NON-cartesian coords'
self.trajectory %= 1
self.structure.coords %= 1