diff --git a/doc/api/history.rst b/doc/api/history.rst index 5c7c231b..90e25857 100644 --- a/doc/api/history.rst +++ b/doc/api/history.rst @@ -58,7 +58,8 @@ In this case, the history file would have the following layout:: The main optimization history is indexed via call counters, in this example ``0`` and ``1``. Note that they do not match the major/minor iterations of a given optimizer, since gradient evaluations are stored separate from the function evaluation. -For SNOPT, a number of other values can be requested and stored in each major iteration, such as the feasibility and optimality from the SNOPT print out file. +For SNOPT and IPOPT, a number of other values can be requested and stored in each major iteration, such as the feasibility and optimality. +See SNOPT and IPOPT documentation pages for more details. API diff --git a/doc/optimizers/IPOPT_options.yaml b/doc/optimizers/IPOPT_options.yaml index 271e7953..a80402f7 100644 --- a/doc/optimizers/IPOPT_options.yaml +++ b/doc/optimizers/IPOPT_options.yaml @@ -10,3 +10,25 @@ linear_solver: desc: The linear solver used. sb: desc: This is an undocumented option which suppresses the IPOPT header from being printed to screen every time. +save_major_iteration_variables: + desc: | + This option is unique to the Python wrapper, and takes a list of values which can be saved at each major iteration to the History file. + The possible values are + + - ``alg_mod``: algorithm mode (0 for regular, 1 for restoration) + - ``d_norm``: infinity norm of the primal step + - ``regularization_size``: regularization term for the Hessian of the Lagrangian + - ``ls_trials``: number of backtracking line search iterations + - ``g_violation``: vector of constraint violations + - ``grad_lag_x``: gradient of Lagrangian + + In addition, a set of default parameters are saved to the history file and cannot be changed. These are + + - ``inf_pr``: primal infeasibility + - ``inf_du``: dual infeasibility (optimality measure) + - ``mu``: barrier parameter + - ``alpha_pr``: step size for primal variables + - ``alpha_du``: step size for dual variables + + pyOptSparse uses the same parameter names as `IPOPT `_ and `cyipopt `_. + Detailed descriptions of these parameter can be found in their documentations. diff --git a/examples/hs015VarPlot.py b/examples/hs015VarPlot.py index 932a08f6..89c0e17a 100644 --- a/examples/hs015VarPlot.py +++ b/examples/hs015VarPlot.py @@ -74,4 +74,32 @@ plt.xlabel("x1") plt.ylabel("x2") plt.title("Simple optimizer comparison") + +# Plot optimality and feasibility history for SNOPT and IPOPT +list_opt_with_optimality = [opt for opt in db.keys() if opt in ["ipopt", "snopt"]] +if len(list_opt_with_optimality) > 0: + fig, axs = plt.subplots(2, 1) + + for opt in list_opt_with_optimality: + # get iteration count, optimality, and feasibility. + # SNOPT and IPOPT uses different parameter names for optimality and feasibility. + if opt == "ipopt": + optimality_name = "inf_du" + feasibility_name = "inf_pr" + elif opt == "snopt": + optimality_name = "optimality" + feasibility_name = "feasibility" + + hist = db[opt].getValues(names=["iter", optimality_name, feasibility_name]) + axs[0].plot(hist["iter"], hist[optimality_name], "o-", label=opt) + axs[1].plot(hist["iter"], hist[feasibility_name], "o-", label=opt) + + axs[0].set_yscale("log") + axs[1].set_yscale("log") + axs[0].legend() + axs[0].set_ylabel("Optimality") + axs[0].set_xticklabels([]) + axs[1].set_ylabel("Feasibility") + axs[1].set_xlabel("Iteration") + plt.show() diff --git a/pyoptsparse/__init__.py b/pyoptsparse/__init__.py index 184c7b20..1eff829b 100644 --- a/pyoptsparse/__init__.py +++ b/pyoptsparse/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.14.3" +__version__ = "2.14.4" from .pyOpt_history import History from .pyOpt_variable import Variable diff --git a/pyoptsparse/pyIPOPT/pyIPOPT.py b/pyoptsparse/pyIPOPT/pyIPOPT.py index 47e3398c..00879d97 100644 --- a/pyoptsparse/pyIPOPT/pyIPOPT.py +++ b/pyoptsparse/pyIPOPT/pyIPOPT.py @@ -47,6 +47,9 @@ def __init__(self, raiseError=True, options={}): # IPOPT needs Jacobians in coo format self.jacType = "coo" + # List of pyIPOPT-specific options. We remove these from the list of options so these don't go into cyipopt. + self.pythonOptions = ["save_major_iteration_variables"] + @staticmethod def _getInforms(): informs = { @@ -81,6 +84,7 @@ def _getDefaultOptions(): "print_user_options": [str, "yes"], "output_file": [str, "IPOPT.out"], "linear_solver": [str, "mumps"], + "save_major_iteration_variables": [list, []], } return defOpts @@ -203,7 +207,7 @@ def __call__( jac["coo"][ICOL].copy().astype("int_"), ) - class CyIPOPTProblem: + class CyIPOPTProblem(cyipopt.Problem): # Define the 4 call back functions that ipopt needs: def objective(_, x): fobj, fail = self._masterFunc(x, ["fobj"]) @@ -242,7 +246,48 @@ def jacobianstructure(_): # Define intermediate callback. If this method returns false, # Ipopt will terminate with the User_Requested_Stop status. - def intermediate(_, *args, **kwargs): + # Also save iteration info in the history file. This callback is called every "major" iteration but not in line search iterations. + # fmt: off + def intermediate(self_cyipopt, alg_mod, iter_count, obj_value, inf_pr, inf_du, mu, d_norm, regularization_size, alpha_du, alpha_pr, ls_trials): + # fmt: on + if self.storeHistory: + iterDict = { + "isMajor": True, + "inf_pr": inf_pr, + "inf_du": inf_du, + "mu": mu, + "alpha_pr": alpha_pr, + "alpha_du": alpha_du, + } + # optional parameters + for saveVar in self.getOption("save_major_iteration_variables"): + if saveVar == "alg_mod": + iterDict[saveVar] = alg_mod + elif saveVar == "d_norm": + iterDict[saveVar] = d_norm + elif saveVar == "regularization_size": + iterDict[saveVar] = regularization_size + elif saveVar == "ls_trials": + iterDict[saveVar] = ls_trials + elif saveVar in ["g_violation", "grad_lag_x"]: + iterDict[saveVar] = self_cyipopt.get_current_violations()[saveVar] + else: + # IPOPT doesn't handle Python error well, so print an error message and send termination signal to IPOPT + print(f"ERROR: Received unknown IPOPT save variable `{saveVar}`. " + + "Please see 'save_major_iteration_variables' option in the pyOptSparse " + + "documentation under 'IPOPT'.") + print("Terminating IPOPT...") + return False + + # Find pyoptsparse call counters for objective and constraints calls at current x. + # IPOPT calls objective and constraints separately, so we find two call counters and append iter_dict to both counters. + call_counter_1 = self.hist._searchCallCounter(self.cache["x"]) + call_counter_2 = self.hist._searchCallCounter(self.cache["x"], last=call_counter_1 - 1) + + for call_counter in [call_counter_2, call_counter_1]: + if call_counter is not None: + self.hist.write(call_counter, iterDict) + if self.userRequestedTermination is True: return False else: @@ -250,10 +295,9 @@ def intermediate(_, *args, **kwargs): timeA = time.time() - nlp = cyipopt.Problem( + nlp = CyIPOPTProblem( n=len(xs), m=ncon, - problem_obj=CyIPOPTProblem(), lb=blx, ub=bux, cl=blc, @@ -296,4 +340,7 @@ def _set_ipopt_options(self, nlp): # --------------------------------------------- for name, value in self.options.items(): + # skip pyIPOPT-specific options + if name in self.pythonOptions: + continue nlp.add_option(name, value) diff --git a/pyoptsparse/pyOpt_history.py b/pyoptsparse/pyOpt_history.py index 8ae163c5..8f82f236 100644 --- a/pyoptsparse/pyOpt_history.py +++ b/pyoptsparse/pyOpt_history.py @@ -152,7 +152,7 @@ def read(self, key): except KeyError: return None - def _searchCallCounter(self, x): + def _searchCallCounter(self, x, last=None): """ Searches through existing callCounters, and finds the one corresponding to an evaluation at the design vector `x`. @@ -162,6 +162,8 @@ def _searchCallCounter(self, x): ---------- x : ndarray The unscaled DV as a single array. + last : int, optional + The last callCounter to search from. If not provided, use the last callCounter in db. Returns ------- @@ -173,7 +175,8 @@ def _searchCallCounter(self, x): ----- The tolerance used for this is the value `numpy.finfo(numpy.float64).eps`. """ - last = int(self.db["last"]) + if last is None: + last = int(self.db["last"]) callCounter = None for i in range(last, 0, -1): key = str(i) diff --git a/pyoptsparse/pyOpt_optimizer.py b/pyoptsparse/pyOpt_optimizer.py index 202d44b5..8ea6e1ff 100644 --- a/pyoptsparse/pyOpt_optimizer.py +++ b/pyoptsparse/pyOpt_optimizer.py @@ -578,9 +578,9 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # timing hist["time"] = time.time() - self.startTime - # Save information about major iteration counting (only matters for SNOPT). - if self.name == "SNOPT": - hist["isMajor"] = False # this will be updated in _snstop if it is major + # Save information about major iteration counting (only matters for SNOPT and IPOPT). + if self.name in ["SNOPT", "IPOPT"]: + hist["isMajor"] = False # this will be updated in _snstop or cyipopt's `intermediate` if it is major else: hist["isMajor"] = True # for other optimizers we assume everything's major diff --git a/tests/test_hs015.py b/tests/test_hs015.py index 84c98d82..4a4e3c8e 100644 --- a/tests/test_hs015.py +++ b/tests/test_hs015.py @@ -130,7 +130,8 @@ def test_optimization(self, optName): def test_ipopt(self): self.optName = "IPOPT" self.setup_optProb() - optOptions = self.optOptions.pop(self.optName, None) + store_vars = ["alg_mod", "d_norm", "regularization_size", "ls_trials", "g_violation", "grad_lag_x"] + optOptions = {"save_major_iteration_variables": store_vars} sol = self.optimize(optOptions=optOptions, storeHistory=True) # Check Solution self.assert_solution_allclose(sol, self.tol[self.optName]) @@ -144,6 +145,16 @@ def test_ipopt(self): data_last = hist.read(hist.read("last")) self.assertGreater(data_last["iter"], 0) + # Check entries in iteration data + data = hist.getValues(callCounters=["last"]) + default_store_vars = ["inf_pr", "inf_du", "mu", "alpha_pr", "alpha_du"] + for var in default_store_vars + store_vars: + self.assertIn(var, data.keys()) + self.assertEqual(data["inf_pr"].shape, (1, 1)) + self.assertEqual(data["inf_du"].shape, (1, 1)) + self.assertEqual(data["g_violation"].shape, (1, 2)) + self.assertEqual(data["grad_lag_x"].shape, (1, 2)) + # Make sure there is no duplication in objective history data = hist.getValues(names=["obj"]) objhis_len = data["obj"].shape[0]