Source code for ddg.optimize.he.minimize

import textwrap

import numpy as np
from scipy.optimize import minimize


[docs]def minimization_history(x, history, functional=None): """ Saves the minimization history in the given iterable (history). Entries are dictionary's linking indices of interior cells to the value after the minimizing steps. Parameters ---------- x: numpy.ndarray Flattened output of scipy.minimize. history: list List to store the results after each minimization step. Entries are dictionary's (see above). functional : ddg.optimize.he.functional.HalfEdgeFunctional Half-edge functional of minimization process. Returns ------- None """ d = {} multi_dim_result = result_decorator_unflatten(x, functional) for cell in functional.interior_cells: val = ( multi_dim_result[cell.interior_cell_index][0] if functional.attr_dimension == 1 else multi_dim_result[cell.interior_cell_index] ) d[cell.index] = val history.append(d)
[docs]def function_decorator_flatten(function, functional): """ Returns a function that wraps the given function to be able to handle flat inputs. Parameters ---------- function: Function of functional that handles multidimensional input. functional : ddg.optimize.he.functional.HalfEdgeFunctional Half-edge functional of minimization process. Returns ------- function """ def wrapper_flatten(flat_input): multi_dim_input = result_decorator_unflatten(flat_input, functional) return function(multi_dim_input) return wrapper_flatten
[docs]def result_decorator_unflatten(x, functional): """ Reshapes the given (flat) array x to the multidimensional shape that was given by the functional. Parameters ---------- x: numpy.ndarray of shape (n, ) Flat input array. functional : ddg.optimize.he.functional.HalfEdgeFunctional Functional of minimization process. Returns ------- function """ dim = functional.attr_dimension l = len(x) if l / dim - l // dim != 0: raise ValueError( textwrap.dedent( f""" The number of elements in the flat input isn't equal to {l // dim} * {dim} (n*dim), where n is the numer of cells and dim the dimension of the attribute. Instead the length of the flat array is {l}. """ ) ) return np.reshape(x, (l // dim, dim))
[docs]def write_to_attribute(multi_dim_result, functional): """ Takes the multidimensional result of the minimization process (i.e. given by result_decorator_unflatten) and writes it to the corresponding cells given by the functional. Parameters ---------- multi_dim_result: np.ndarray of shape(n, dim) Where n is the number of (interior-) cells in the minimization process and dim the dimension of the minimization attribute. functional : ddg.optimize..he.functional.HalfEdgeFunctional Functional of minimization process. Returns ------- None """ for cell in functional.interior_cells: val = ( multi_dim_result[cell.interior_cell_index][0] if functional.attr_dimension == 1 else multi_dim_result[cell.interior_cell_index] ) setattr(cell, functional.attr_name, val)
# TODO make history a part of the fucntional # TODO add keyword argument allowing other minimization functions
[docs]def minimizer(functional, return_history=False, **kwargs): r""" Minimizer for a given functional. Minimizes value of interior cells where these cells are being computed as the complement of fucntional.boundary_cells. The values must be stored in a cell attribute, referenced by functional.attr_name. The scipy.optimize.minimize function only accepts functionals with 1-dimensional input. For functionals that have multidimensional input, the minimizer acts as a decorator and wraps the functional to convert the input to a multidimensional one. The current values are being used as starting values and the methods functional.gradient and functional.hessian are being used in optimization if they are implemented. The history keyword allows to store the result of the minimization process after each step. Parameters ---------- functional : ddg.optimize.functional.he.HalfEdgeFunctional functional to minimize history: bool (default=False) \*\*kwargs: keyword arguments for scipy.optimize.minimize By default jac=functional.gradient and hess=functional.hessian if existent Returns ------- scipy.optimize.OptimizeResult and updates the desired values of the surface Warnings -------- If history argument is True, a callback function will be initialized to store the step wise results of the minimization. This will overwrite any callback function that might be given by the \*\*kwargs input. See Also -------- ddg.optimize.minimize.minimization_history """ x0 = np.array([getattr(v, functional.attr_name) for v in functional.interior_cells]) flat_x0 = x0.flatten() kwargs.update({"x0": flat_x0}) if hasattr(functional, "gradient"): kwargs.update( {"jac": function_decorator_flatten(functional.gradient, functional)} ) if hasattr(functional, "hessian"): kwargs.update( {"hess": function_decorator_flatten(functional.hessian, functional)} ) if return_history: history = [] kwargs["callback"] = lambda x: minimization_history(x, history, functional) result = minimize( function_decorator_flatten(functional.evaluate, functional), **kwargs ) if result.success: multi_dim_result = result_decorator_unflatten(result.x, functional) write_to_attribute(multi_dim_result, functional) if return_history: return result, history else: return result