diff --git a/chemtools/__init__.py b/chemtools/__init__.py
index fa23ccdc..1dca91bf 100644
--- a/chemtools/__init__.py
+++ b/chemtools/__init__.py
@@ -30,6 +30,7 @@
from chemtools.denstools import *
from chemtools.utils import *
from chemtools.outputs import *
+from chemtools.topology import *
import horton
diff --git a/chemtools/topology/__init__.py b/chemtools/topology/__init__.py
index 96e4a896..e89beba9 100644
--- a/chemtools/topology/__init__.py
+++ b/chemtools/topology/__init__.py
@@ -23,4 +23,6 @@
"""The Scalar Field Topological Analysis Module."""
-from chemtools.topology import *
+from chemtools.topology.critical import *
+from chemtools.topology.ode import *
+from chemtools.topology.point import *
diff --git a/chemtools/topology/critical.py b/chemtools/topology/critical.py
index 96a54ec4..2c4f1f61 100644
--- a/chemtools/topology/critical.py
+++ b/chemtools/topology/critical.py
@@ -26,15 +26,18 @@
import warnings
import numpy as np
-from scipy.spatial import cKDTree
+from scipy.optimize import root
+from scipy.spatial import cKDTree, KDTree
+from scipy.spatial.distance import cdist
from chemtools.topology.point import CriticalPoint
+from chemtools.topology.ode import bond_paths_rk45
class Topology(object):
"""Topology class for searching critical points given scalar function."""
- def __init__(self, func, func_grad, func_hess, points, coords=None, n_neighbours=4):
+ def __init__(self, func, func_grad, func_hess, points=None, coords=None, n_neighbours=4):
r"""Initialize Topology class instance.
Parameters
@@ -45,29 +48,35 @@ def __init__(self, func, func_grad, func_hess, points, coords=None, n_neighbours
Method for computing the gradient vector of scalar function.
func_hess : callable[np.ndarray(N, 3) -> np.ndarray(3, 3)]
Method for computing the hessian matrix of scalar function.
- points : np.ndarray(M, 3)
- Cartesian coordinates of :math:`M` initial guess points.
+ points : np.ndarray(M, 3), optional
+ Cartesian coordinates of :math:`M` initial guess points. If `coords` is not None,
+ then the initial guesses are generated between calculating midpoints between
+ every 2, 3, and 4 atoms.
coords : np.ndarray(N, 3), optional
Cartesian coordinates of :math:`N` atomic centers to use as additional initial guesses.
n_neighbours: int, optional
Number of polyhedron vertices.
"""
- if points.ndim != 2 and points.shape[1] != 3:
+ if points is not None and (points.ndim != 2 and points.shape[1] != 3):
raise ValueError("Argument points should be a 2D-array with 3 columns!")
- if points.shape[0] < 4:
+ if points is not None and points.shape[0] < 4:
raise ValueError("At least 4 points are needed for critical point search!")
- self._points = points
if coords is not None and coords.ndim != 2 and coords.shape[1] != 3:
raise ValueError("Argument coords should be a 2D-array with 3 columns.")
if coords is not None:
+ if points is None:
+ points = self._generate_initial_guesses(coords, nbhr=10)
self._coords = coords
- self._kdtree = cKDTree(np.vstack((self._coords, self._points)))
+ self._kdtree = cKDTree(np.vstack((self._coords, points)))
else:
+ if points is None:
+ raise ValueError(f"Points {points} can't be None, while {coords} is also None.")
self._coords = None
self._kdtree = cKDTree(points)
+ self._points = points
self.func = func
self.grad = func_grad
self.hess = func_hess
@@ -75,6 +84,15 @@ def __init__(self, func, func_grad, func_hess, points, coords=None, n_neighbours
self._neighbours = self._polyhedron_coordinates(n_neighbours)
# dictionary for storing critical points using (rank, signature) as key
self._cps = {}
+ # dictionary for storing bond paths of each bond-critical point using the index
+ # of critical point as key and the items are the list: [ndarray(M_1, 3), ndarray(M_2, 3)]
+ # signifying the two different bond paths.
+ self._bp = {}
+
+ @property
+ def bp(self):
+ """Dictionary of bond paths for each bond-critical point. Keys are indices of `bcp`."""
+ return self._bp
@property
def cps(self):
@@ -194,3 +212,392 @@ def _polyhedron_coordinates(n_vertices):
else:
raise NotImplementedError("Number of vertices {} is not supported".format(n_vertices))
return coords
+
+ def newton_step(self, pts, alpha=1.0, use_log=False):
+ def grad_func_log(dens_vals, grad_vals):
+ return grad_vals / dens_vals[:, None]
+
+ def hess_func_log(dens_vals, grad_vals, hess_vals):
+ grad_mat = np.einsum("ni,nj->nij", grad_vals, grad_vals)
+ return hess_vals / dens_vals[:, None, None] - grad_mat / dens_vals[:, None, None] ** 2.0
+
+ dens = self.func(pts)
+ grad = self.grad(pts)
+ hess = self.hess(pts)
+ if use_log:
+ grad_log = grad_func_log(dens, grad)
+ hess_log = hess_func_log(dens, grad, hess)
+ solution = np.linalg.solve(hess_log, grad_log)
+ else:
+ solution = np.linalg.solve(hess, grad)
+
+ return pts - alpha[:, None] * solution, np.linalg.norm(grad, axis=1), hess
+
+ def remove_duplicates(self, pts_converged, decimal_uniq, rad_uniq):
+ r"""
+ Remove duplicates found in `pts_converged`.
+
+ The procedure is as follows:
+ 1. Points are rounded to `decimal_uniq` decimal place and grouped together. If
+ the distance between points is still less than `rad_uniq`, then step 2 is performed.
+ 2. Using a KDTree, each point a ball of radius `rad_uniq` is constructed and
+ the point within the ball with the smallest gradient norm is selected as
+ the "unique" point. This process is iterated until all points are within a ball.
+
+ """
+ # Round to `decimal_uniq` decimal place and remove any non-unique points.
+ test, indices = np.unique(
+ np.round(pts_converged, decimals=decimal_uniq),
+ axis=0,
+ return_index=True
+ )
+ pts_converged = test[np.argsort(indices)]
+
+ # Get final gradients
+ final_gradients = np.linalg.norm(self.grad(pts_converged), axis=1)
+
+ # See if any duplicates were found then run the kdtree algorithm
+ dist = cdist(pts_converged, pts_converged)
+ dist[np.diag_indices(len(pts_converged))] = 1.0
+ maxim = np.any(dist < rad_uniq, axis=1)
+ if np.any(maxim):
+ indices = set(range(len(pts_converged)))
+ tree = KDTree(pts_converged)
+ indices_include = []
+ while len(indices) != 0:
+ index = indices.pop()
+ query = tree.query_ball_point(pts_converged[index], r=rad_uniq)
+ gradient_ball = final_gradients[query]
+ sort_indices = np.argsort(gradient_ball)
+ indices_include.append(query[sort_indices[0]]) # Pick the point with smallest gradient
+ indices = indices.difference(query)
+
+ final_gradients = final_gradients[indices_include]
+ pts_converged = pts_converged[indices_include]
+
+ return pts_converged, final_gradients
+
+ def _generate_initial_guesses(self, atom_coords, nbhr=10):
+ r"""
+ Generates initial guesses for QTAIM points.
+
+ First adds midpoints between all atom pairs. Next grabs `nbhr` atoms from
+ each atom and computes midpoints between every three atoms, and every four
+ atoms. Finally, removes any duplicates.
+
+ Parameters
+ ----------
+ atom_coords: ndarray(M, 3)
+ The atomic coordinates of the atoms.
+ nbhr: int, optional
+ The number of neighbors to calculate the midpoints for every three, and four atoms.
+
+ Returns
+ --------
+ ndarray(N, 3)
+ Returns possible initial guesses for the Newton-Ralphson algorithm.
+
+ """
+ natoms = len(atom_coords)
+
+ # Start by adding the coordinates of the atom
+ init_pts = atom_coords.copy()
+
+ # Add the midpoint between any two atoms
+ arr1 = init_pts.copy()[:, np.newaxis, :]
+ init_pts = np.vstack((init_pts, np.reshape((arr1 + init_pts) / 2.0, (len(init_pts) * len(init_pts), 3))))
+
+ # Construct a KDTree, and search for (usually) ten neighbours at a time.
+ tree = KDTree(atom_coords)
+ nbhrs = min(natoms, nbhr)
+
+ # Go through each atom
+ for i in range(natoms):
+ # Grab `nbhrs` closest atoms of the current atom.
+ _, indices = tree.query(atom_coords[i], k=nbhrs)
+
+ # Using only the `nbhrs` atoms, construct midpoints every three atoms.
+ coords = atom_coords[indices]
+ arr1 = coords.copy()[:, np.newaxis, np.newaxis, :]
+ arr2 = coords.copy()[np.newaxis, :, np.newaxis, :]
+ init_pts = np.vstack((init_pts, np.reshape((arr1 + arr2 + coords) / 3.0, (nbhrs ** 3, 3))))
+
+ # Using only the `nbhrs` atoms, construct midpoints every four atoms.
+ arr1 = coords.copy()[:, np.newaxis, np.newaxis, np.newaxis, :]
+ arr2 = coords.copy()[np.newaxis, :, np.newaxis, np.newaxis, :]
+ arr3 = coords.copy()[np.newaxis, np.newaxis, :, np.newaxis, :]
+ init_pts = np.vstack((init_pts, np.reshape((arr1 + arr2 + arr3 + coords) / 4.0, (nbhrs ** 4, 3))))
+
+ # Remove any duplicates
+ init_pts = np.unique(np.round(init_pts, decimals=10), axis=0)
+
+ return init_pts
+
+ def find_critical_points_vectorized(
+ self,
+ include_centers=None,
+ xtol=1e-8,
+ ftol=1e-8,
+ ss_rad=0.1,
+ init_norm_cutoff=0.5,
+ dens_cutoff=1e-5,
+ decimal_uniq=12,
+ rad_uniq=0.01,
+ maxiter=250,
+ failure_use_scipy=True,
+ use_log=False,
+ verbose=True
+ ):
+ r"""
+ Find and store the critical points with a vectorization approach.
+
+ Parameters
+ ----------
+ include_centers: ndarray(M, 3), optional
+ Used to include points near these centers (e.g. atomic coordinates)
+ xtol: float, optional
+ Point converges if the difference between two iterations is less than `xtol`
+ in all three dimensions. Also needs to satisfy `ftol` convergence.
+ ftol: float, optional
+ Point converges if the norm of the gradient is less than `ftol`. Also needs
+ to satisfy `xtol` convergence.
+ ss_rad: float, optional
+ Points that are 2`ss_rad` away from the atomic coordinates are included as
+ initial guesses.
+ init_norm_cutoff: float, optional
+ Points whose gradient norm is less than `init_norm_cutoff` are excluded when constructing
+ the initial atomic grid.
+ dens_cutoff: float, optional
+ If the point diverges during the critical point finding process then it excludes it based
+ on its density value being less than `dens_cutoff`.
+ decimal_uniq: float, optional
+ Rounds each critical point to this `decimal_uniq` decimal place. Used to condense points
+ together that are close together.
+ rad_uniq: float, optional
+ Points that are `rad_uniq` apart, are grouped together and the one with the smallest
+ gradient norm is picked. Used to condense points together that are close together
+ use_log: float, optional
+ If true, then uses the logarithm of the electron density. Speeds-up convergence.
+ verbose: float, optional
+ If true, then prints the sum of the norm of the gradient per iteration and number of
+ points that didn't converge.
+
+ Warns
+ -----
+ RuntimeWarning
+ - Poincare-Hopf equation isn't satisfied. This implies not all critical points were
+ found.
+ - The norm of the gradient of the final crtical points are not less than 1e-5.
+
+ Notes
+ -----
+ - Newton-Ralphson equation is used to find the roots of the gradient.
+
+ - The algorithm is as follows:
+ 1. Constructs an atomic grid over each atomic center.
+ 2. Points that are close to center and that have small gradients are
+ included to be initial guesses. Points whose density values is small are not.
+ 3. While-loop is done until each point from the initial guess converged.
+ 4. Points whose density values are too small are removed from the iteration process.
+ 5. If points didn't converge then Scipy is used instead but points are reduced
+ considerably since Scipy is slower as it doesn't accept vectorization.
+ 6. Points that are identical are merged together by first rounding to some decimal
+ places and then a KDTree is used to group the points into small balls and the
+ points with the smallest gradient are picked out of each ball.
+
+ """
+ # Construct set of points around each center
+ # from grid.onedgrid import OneDGrid
+ # from grid.atomgrid import AtomGrid
+ # u_bnd_rad=1.5
+ # natoms = len(atom_coords)
+ # atom_grids_list = []
+ # rad_grid = np.arange(0.0, u_bnd_rad, ss_rad)
+ # rgrid = OneDGrid(points=rad_grid, weights=np.empty(rad_grid.shape))
+ # for i in range(natoms):
+ # atomic_grid = AtomGrid.from_preset(10, preset="fine", rgrid=rgrid, center=atom_coords[i])
+ # atom_grids_list.append(atomic_grid.points)
+ # pts = np.vstack(atom_grids_list)
+
+ # Use density to remove points greater than certain value (0.001 au)
+ cut_off = self.func(self._points) > dens_cutoff
+ pts = self._points[cut_off, :]
+
+ # Include points that have small gradients and close to the centers provided
+ # Useful in cases where loads of naive points are provided
+ norm_grad = np.linalg.norm(self.grad(pts), axis=1)
+ if include_centers is not None:
+ close_centers = np.any(cdist(pts, include_centers) < 2 * ss_rad, axis=1)
+ pts_decis = close_centers | (norm_grad < init_norm_cutoff)
+ pts = pts[pts_decis]
+ norm_grad = norm_grad[pts_decis]
+
+ if verbose:
+ print(f"Start Critical Points Finding.")
+ print(f"Number of initial points: {len(pts)}")
+
+ # Solve critical-point finding via Newton-Ralpson with backtracing
+ # Keep going until all points converge
+ p1, p0, grad_norm0 = np.ones(pts.shape) * 1000.0, pts, norm_grad
+ alpha = np.ones(len(p1)) # Initial Step-Sizes
+ converged_pts = np.empty((0, 3), dtype=float)
+ niter = 0
+
+ count_not_changed = 0
+ prev_number_pts = len(p1)
+ while len(p1) != 0 and niter < maxiter:
+ # Take Newton-Ralphson Step
+ p1, grad_norm1, hess = self.newton_step(p0, alpha=alpha, use_log=use_log)
+
+ if verbose:
+ print(f"Iteration: {niter + 1}, "
+ f"Sum Gradient Norm of pts left: {(np.sum(grad_norm1))} "
+ f"Number of pts left: {len(p1)}")
+ # TODO: Add Armijo condition
+
+ # Remove points whose density is very small and gradient
+ dens1 = self.func(p1)
+ include = (dens1 > dens_cutoff)
+ p1 = p1[include, :]
+ p0 = p0[include, :]
+ alpha = alpha[include]
+ grad_norm1 = grad_norm1[include]
+
+ # Store points that converge in its 3 dimensions
+ err = np.abs(p1 - p0)
+ did_converge = np.all(err < xtol, axis=1) & (grad_norm1 < ftol)
+ converged_pts = np.vstack((converged_pts, p1[did_converge, :]))
+
+ # Remove points that did converge
+ didnt_converge = np.bitwise_not(did_converge)
+ p1 = p1[didnt_converge, :]
+ alpha = alpha[didnt_converge]
+ grad_norm1 = grad_norm1[didnt_converge]
+
+ # If no addtional points converged, increase count
+ if len(p1) == prev_number_pts:
+ count_not_changed += 1
+ else:
+ # If points did converge, set alpha/stepsize back to one
+ prev_number_pts = len(p1)
+ count_not_changed = 0
+ alpha[:] = 1.0
+
+ # If the number of times points didn't converge per 30 iterations
+ # then decrease step-size by half
+ if count_not_changed > 0 and count_not_changed % 30 == 0:
+ # Change step-size
+ print("Change stepsize ", alpha[0], count_not_changed)
+ alpha /= 2.0
+
+ # Update variables for next iteration
+ p0 = p1
+ niter += 1
+
+ if niter == maxiter:
+ if failure_use_scipy:
+ p1, _ = self.remove_duplicates(p1, decimal_uniq, rad_uniq)
+ if verbose:
+ print(f"Convergence not achieved, attempt using Scipy."
+ f" Number of points didn't converge: {len(p1)}")
+
+ # Try Scipy instead: Go through each pt individually.
+ # Note this is slower but convergence is guaranteed.
+ def log_grad(pt):
+ grad_vals = self.grad(np.array([pt]))[0]
+ dens_vals = self.func(np.array([pt]))[0]
+
+ grad_mat = np.outer(grad_vals, grad_vals)
+ if dens_vals < 1e-10:
+ dens_vals = 1e-10
+ return grad_vals / dens_vals
+
+ other_converged_pts = []
+ for x in p1:
+ sol = root(log_grad, x0=x, method="anderson",
+ options={"fatol": ftol, "disp":True, "maxiter": 1000})
+ assert sol.success, f"One of the roots did not converge using scipy {sol}"
+ other_converged_pts.append(sol.x)
+
+ converged_pts = np.vstack((converged_pts, np.array(other_converged_pts, dtype=float)))
+ else:
+ raise RuntimeError(f"Maximum number of iterations {niter} == {maxiter} was reached.")
+
+ # Remove duplicates from entire set of points.
+ converged_pts, final_gradients = self.remove_duplicates(converged_pts, decimal_uniq, rad_uniq)
+
+ if verbose:
+ print(f"Final number of points found {len(converged_pts)}")
+
+ # Check the gradient are all small
+ if np.any(final_gradients > 1e-5):
+ warnings.warn("Gradient of the final critical points are not all smaller than 1e-5.",
+ RuntimeWarning)
+
+ # compute rank & signature of critical point
+ hess = self.hess(converged_pts)
+ eigs, evecs = np.linalg.eigh(hess)
+ for i in range(len(converged_pts)):
+ cp = CriticalPoint(converged_pts[i], eigs[i], evecs[i], 1e-4)
+ self._cps.setdefault((cp.rank[0], cp.signature[0]), []).append(cp)
+
+ # check Poincare–Hopf equation
+ if not self.poincare_hopf_equation:
+ warnings.warn("Poincare–Hopf equation is not satisfied.", RuntimeWarning)
+
+ def find_bond_paths(self, eps_dir=1e-4, tol=1e-8, ss_0=1e-8, max_ss=0.25, maxiter=2000):
+ r"""
+ Find bond paths of each bond critical point.
+
+ The critical points must be found first.
+ RungeRunge-Kutta of order 4(5) with adaptive step-size is used to solve for
+ bond path.
+
+ Parameters
+ ----------
+ eps_dir : float, optional
+ The displacement in the direction in the eigenvector of the Hessian at the bond
+ critical point.
+ tol : float, optional
+ Tolerance for the adaptive step-size.
+ ss_0 : float, optional
+ The initial step-size of the ODE (RK45) solver.
+ max_ss : float, optional
+ Maximum step-size of the ODE (RK45) solver.
+ maxiter : int, optional
+ The maximum number of iterations of taking a step in the ODE solver.
+
+ """
+ if not self._cps:
+ raise RuntimeError(f"Dictionary empty, critical points should be found first.")
+ if eps_dir < 0.0:
+ raise ValueError(f"eps_dir {eps_dir} must be positive.")
+
+ all_pts = []
+ for i, cp in enumerate(self.bcp):
+ coordinate = cp.coordinate
+ eigs = cp.eigenvalues
+ evecs = cp._eigenvectors
+ if np.any(eigs[0] < -1e-8):
+ bond_path_dir = evecs[:, eigs[0] > 0.0].T
+
+ for b in bond_path_dir:
+ dir1 = coordinate + eps_dir * b
+ dir2 = coordinate - eps_dir * b
+ all_pts.append(dir1)
+ all_pts.append(dir2)
+ all_pts = np.array(all_pts)
+ bond_paths = bond_paths_rk45(
+ all_pts,
+ self.func,
+ self.grad,
+ ss_0=ss_0,
+ max_ss=max_ss,
+ tol=tol,
+ maxiter=maxiter
+ )
+
+ count = 0
+ for i_cp in range(len(self.bcp)):
+ self._bp[i_cp] = [np.array(bond_paths[count]), np.array(bond_paths[count + 1])]
+ count += 2
diff --git a/chemtools/topology/ode.py b/chemtools/topology/ode.py
new file mode 100644
index 00000000..fe6c89f9
--- /dev/null
+++ b/chemtools/topology/ode.py
@@ -0,0 +1,211 @@
+# -*- coding: utf-8 -*-
+# ChemTools is a collection of interpretive chemical tools for
+# analyzing outputs of the quantum chemistry calculations.
+#
+# Copyright (C) 2016-2019 The ChemTools Development Team
+#
+# This file is part of ChemTools.
+#
+# ChemTools is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# ChemTools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see
+#
+# --
+import numpy as np
+
+__all__ = [
+ "bond_paths_rk45",
+]
+
+
+def _get_normalized_gradient_func(grad_func):
+ r"""Returns the normalized version of the function."""
+ def norm_grad_func(x):
+ gradient = grad_func(x)
+ return gradient / np.linalg.norm(gradient, axis=1)[:, None]
+ return norm_grad_func
+
+
+def _RK45_step(pts, grad_func, step_size, grad0=None):
+ r"""
+ Runge-Kutta fourth and five-order step for the following ode system:
+
+ .. math::
+ \frac{d(r(s))}{ds} = \nabla \rho (r(s)),
+
+ where :math:`\nabla \rho(x)` is the gradient of a function.
+
+ Parameters
+ ----------
+ pts : ndarray(M, 3)
+ Points to take a step in.
+ grad_func: callable(ndarray(M,3), ndarray(M,3))
+ Callable function that takes in points and obtains the gradient.
+ step_size: float
+ Stepsize for the step
+ grad0: ndarray(M, 3)
+ If the gradient is already computed on `pts`, then avoids re-computing it.
+
+ Returns
+ -------
+ (ndarray(M,3), ndarray(M,3)):
+ Returns fourth-order and fifth-order Runge-Kutta step, respectively.
+
+ """
+ # Fourth and Fifth-Order Runge-Kutta
+ if grad0 is None:
+ k1 = step_size * grad_func(pts)
+ else:
+ k1 = step_size * grad0
+ k2 = step_size * grad_func(pts + 0.4 * k1)
+ k3 = step_size * grad_func(pts + (3.0 / 32) * k1 + (9.0 / 32.0) * k2)
+ k4 = step_size * grad_func(pts + (1932 / 2197) * k1 - (7200 / 2197) * k2 + (7296 / 2197) * k3)
+ k5 = step_size * grad_func(
+ pts + (439 / 216) * k1 - 8.0 * k2 + (3680 / 513) * k3 - (845 / 4104) * k4
+ )
+ k6 = step_size * grad_func(
+ pts
+ - (8.0 / 27.0) * k1
+ + 2.0 * k2
+ - (3544 / 2565) * k3
+ + (1859 / 4104) * k4
+ - (11 / 40) * k5
+ )
+
+ # Get the fourth and five-order approximation
+ y_four = pts + (25.0 / 216.0) * k1 + (1408 / 2565) * k3 + (2197 / 4101) * k4 - k5 / 5.0
+ y_five = (
+ pts +
+ (16.0 / 135.0) * k1
+ + (6656 / 12825) * k3
+ + (28561 / 56430) * k4
+ - (9.0 / 50.0) * k5
+ + (2.0 / 55.0) * k6
+ )
+ return y_four, y_five
+
+
+def bond_paths_rk45(
+ initial_pts,
+ dens_func,
+ grad_func,
+ ss_0=1e-7,
+ tol=1e-7,
+ max_ss=0.25,
+ tol_conv=1e-10,
+ maxiter=2000
+):
+ r"""
+ Solves the following problem ODE using Runge-Kutta of order 4(5) with adaptive step-size
+
+ .. math::
+ \frac{d(r(t))}{dt} = \frac{\nabla \rho(r(t))}{|| \rho(r() ||}
+
+ over a set of points.
+
+ Parameters
+ ----------
+ initial_pts: ndarray(N, 3)
+ Initial points to solve for steepest-ascent/backtracing.
+ dens_func: callable(ndarray(N,3), ndarray(N))
+ The electron density function.
+ grad_func: callable(ndarray(N,3), ndarray(N,3))
+ The gradient of the electron density.
+ beta_spheres: ndarray(M,)
+ The beta-sphere/trust-region radius of each atom. These are spheres
+ centered at each maxima that reduces the convergence of each point.
+ maximas: ndarray(M,3)
+ The position of each atoms in the molecule.
+ ss_0: float, optional
+ The initial step-size of the ODE (RK45) solver.
+ max_ss: float, optional
+ Maximum step-size of the ODE (RK45) solver.
+ tol_conv: float, optional
+ Tolerance for the convergence of a point from a ODE step.
+ tol: float, optional
+ Tolerance for the adaptive step-size.
+ maxiter: int, optional
+ The maximum number of iterations of taking a step in the ODE solver.
+ terminate_if_other_basin_found : bool
+ If true, then if multiple basin values were found, then the ODE solver will exit.
+ If false, then the ODE solver will run until all points enter one of the
+ beta-sphere/trust-region.
+
+ Returns
+ -------
+ ndarray(N,):
+ Integer array that assigns each point to a basin/maxima/atom of the molecule.
+ If value is negative one, then the point wasn't assigned to a basin.
+
+ """
+ norm_grad_func = _get_normalized_gradient_func(grad_func)
+
+ numb_pts = initial_pts.shape[0]
+ if isinstance(ss_0, float):
+ ss = np.ones((numb_pts, 1)) * ss_0
+ elif isinstance(ss_0, np.ndarray):
+ if not ss_0.shape[1] == 1:
+ raise ValueError(f"Steps-size {ss_0.shape} should have shape of the form (N, 1).")
+ ss = ss_0.copy()
+ else:
+ raise TypeError(f"Step-size ss_0 {type(ss_0)} should have type float or array.")
+
+ pts = initial_pts.copy()
+ dens_vals0 = dens_func(initial_pts)
+
+ not_found_indices = np.arange(numb_pts)
+ niter = 0 # Number of iterations
+ grad0 = norm_grad_func(pts) # Avoids re-computing the gradient twice, used to check for NNA
+ pts_curr = pts.copy()
+ gradient_paths = [[] for _ in range(len(pts_curr))]
+ while len(not_found_indices) != 0:
+ if niter == maxiter:
+ raise RuntimeError(
+ f"Number of iterations reached maximum {niter}, "
+ f"this may be because of a non-nuclear attractor (NNA) which may cause the ODE "
+ f"to cycle between two points. Repeat this calculation by including the "
+ f"non-nuclear attractor to the list of critical points."
+ )
+ y_four, y_five = _RK45_step(pts_curr, norm_grad_func, ss, grad0)
+
+ # Update step-size
+ ss = (tol * ss / (2.0 * np.linalg.norm(y_five - y_four, axis=1)[:, None])) ** 0.25
+ ss[ss > max_ss] = max_ss
+
+ # Get density values and if the density-values decreased, reduce step-size
+ dens_vals1 = dens_func(y_five)
+ indices = np.where(dens_vals1 <= dens_vals0)[0]
+ if len(indices) != 0:
+ y_five[indices, :] = pts_curr[indices, :]
+ ss[indices] *= 0.25
+
+ for i, global_index_pt in enumerate(not_found_indices):
+ if i not in indices:
+ gradient_paths[global_index_pt].append(y_five[i])
+
+ # Check any points are within the beta-spheres and remove them if they converged.
+ converged = np.where(np.all(np.abs(y_five - pts_curr) < tol_conv, axis=1))[0]
+ if len(converged) != 0:
+ not_found_indices = np.delete(not_found_indices, converged)
+ y_five = np.delete(y_five, converged, axis=0)
+ ss = np.delete(ss, converged)[:, None]
+ dens_vals1 = np.delete(dens_vals1, converged)
+
+ if len(y_five) == 0:
+ break
+ # Update next iteration
+ grad0 = norm_grad_func(y_five)
+ pts_curr = y_five.copy()
+ dens_vals0 = dens_vals1
+ niter += 1
+
+ return gradient_paths
diff --git a/chemtools/topology/point.py b/chemtools/topology/point.py
index e992373e..66cf4398 100644
--- a/chemtools/topology/point.py
+++ b/chemtools/topology/point.py
@@ -170,3 +170,8 @@ def __init__(self, coordinate, eigenvalues, eigenvectors, eps=1e-15):
def coordinate(self):
"""Cartesian coordinate of critical point."""
return self._coord
+
+ @property
+ def eigenvectors(self):
+ """Eigenvectors of the critical point."""
+ return self._eigenvectors
diff --git a/chemtools/topology/test/test_critical.py b/chemtools/topology/test/test_critical.py
index 9ff49461..6aea618f 100644
--- a/chemtools/topology/test/test_critical.py
+++ b/chemtools/topology/test/test_critical.py
@@ -1,11 +1,105 @@
+# -*- coding: utf-8 -*-
+# ChemTools is a collection of interpretive chemical tools for
+# analyzing outputs of the quantum chemistry calculations.
+#
+# Copyright (C) 2016-2019 The ChemTools Development Team
+#
+# This file is part of ChemTools.
+#
+# ChemTools is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# ChemTools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see
+#
+# --
"""Test critical point finder."""
+
+import numpy as np
+import pathlib
+import pytest
+from scipy.spatial.distance import cdist
+
+from chemtools.wrappers.molecule import Molecule
+from chemtools.wrappers.grid import MolecularGrid
+from chemtools.topology.critical import Topology
+
+
+def _run_critical_point_algorithm(fchk, generate_grid=False):
+ file_path = pathlib.Path(__file__).parent.resolve().__str__()[:-13]
+ file_path += "data/examples/" + fchk
+
+ mol = Molecule.from_file(file_path)
+
+ if generate_grid:
+ # Generate points
+ pts = MolecularGrid.from_molecule(mol).points
+ centers = None
+ else:
+ pts = None
+ centers = mol.coordinates
+
+ result = Topology(
+ mol.compute_density,
+ mol.compute_gradient,
+ mol.compute_hessian,
+ pts,
+ centers
+ )
+ result.find_critical_points_vectorized(mol.coordinates, verbose=False)
+ return mol, result
+
+@pytest.mark.parametrize(
+ "fchk, numb_bcp, generate_grid", [
+ ("h2o.fchk", 2, True), ("nh3.fchk", 3, True), ("ch4.fchk", 4, True),
+ ("h2o.fchk", 2, False), ("nh3.fchk", 3, False), ("ch4.fchk", 4, False),
+ ]
+)
+def test_critical_points_has_correct_number(fchk, numb_bcp, generate_grid):
+ mol, result = _run_critical_point_algorithm(fchk, generate_grid)
+
+ assert len(result.bcp) == numb_bcp
+ assert len(result.nna) == len(mol.coordinates)
+ assert len(result.rcp) == 0
+ assert len(result.ccp) == 0
+ assert len(result.cps) == (len(result.nna) + len(result.bcp))
+ assert result.poincare_hopf_equation == 1
+
+@pytest.mark.parametrize(
+ "fchk, numb_bcp", [("h2o.fchk", 2), ("nh3.fchk", 3), ("ch4.fchk", 4)]
+)
+def test_bond_paths_terminate_at_maxima(fchk, numb_bcp):
+ r"""Test that the bond paths terminate at a maxima"""
+ mol, result = _run_critical_point_algorithm(fchk)
+ result.find_bond_paths(max_ss=1e-3)
+
+ # Test there is only two directions for each bcp
+ for i in range(0, numb_bcp):
+ assert len(result.bp[i]) == 2
+
+ # Test the termination point is an atomic coordinate
+ for i in range(0, numb_bcp):
+ for j in range(0, 2):
+ bond_path1 = result.bp[i][j][-1, :]
+ dist = np.linalg.norm(bond_path1 - mol.coordinates, axis=1)
+ assert np.any(dist < 0.1)
+
+
"""
from unittest import TestCase
from chemtools.topology.critical import Topology, CriticalPoint
import numpy as np
+
from numpy.testing import assert_allclose