Skip to content
Open
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion doc/api/history.rst
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,8 @@ In this case, the history file would have the following layout::
The main optimization history is indexed via call counters, in this example ``0`` and ``1``.
Note that they do not match the major/minor iterations of a given optimizer, since gradient evaluations are stored separate from the function evaluation.

For SNOPT, a number of other values can be requested and stored in each major iteration, such as the feasibility and optimality from the SNOPT print out file.
For SNOPT and IPOPT, a number of other values can be requested and stored in each major iteration, such as the feasibility and optimality.
See SNOPT and IPOPT documentation pages for more details.


API
Expand Down
19 changes: 19 additions & 0 deletions doc/optimizers/IPOPT_options.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,22 @@ linear_solver:
desc: The linear solver used.
sb:
desc: This is an undocumented option which suppresses the IPOPT header from being printed to screen every time.
save_major_iteration_variables:
desc: |
This option is unique to the Python wrapper, and takes a list of values which can be saved at each major iteration to the History file.
The possible values are

- ``alg_mod``: algorithm mode (0 for regular, 1 for restoration)
- ``d_norm``: infinity norm of the primal step
- ``regularization_size``: regularization term for the Hessian of the Lagrangian
- ``ls_trials``: number of backtracking line search iterations
- ``g_violation``: vector of constraint violations
- ``grad_lag_x``: gradient of Lagrangian

In addition, a set of default parameters are saved to the history file and cannot be changed. These are

- ``feasibility``: primal infeasibility (called ``inf_pr`` in IPOPT)
- ``optimality``: dual infeasibility (called ``inf_du`` in IPOPT) which is an optimality measure
- ``mu``: barrier parameter
- ``step_primal``: step size for primal variables (called ``alpha_pr`` in IPOPT)
- ``step_dual``: step size for dual variables (called ``alpha_du`` in IPOPT)
21 changes: 21 additions & 0 deletions examples/hs015VarPlot.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,4 +74,25 @@
plt.xlabel("x1")
plt.ylabel("x2")
plt.title("Simple optimizer comparison")

# Plot optimality and feasibility history for SNOPT and IPOPT
list_opt_with_optimality = [opt for opt in db.keys() if opt in ["ipopt", "snopt"]]
if len(list_opt_with_optimality) > 0:
fig, axs = plt.subplots(2, 1)

for opt in list_opt_with_optimality:
# get iteration count, optimality, and feasibility
hist = db[opt].getValues(names=["iter", "optimality", "feasibility"])

axs[0].plot(hist["iter"], hist["optimality"], "o-", label=opt)
axs[1].plot(hist["iter"], hist["feasibility"], "o-", label=opt)

axs[0].set_yscale("log")
axs[1].set_yscale("log")
axs[0].legend()
axs[0].set_ylabel("Optimality")
axs[0].set_xticklabels([])
axs[1].set_ylabel("Feasibility")
axs[1].set_xlabel("Iteration")

plt.show()
66 changes: 62 additions & 4 deletions pyoptsparse/pyIPOPT/pyIPOPT.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,17 @@ def __init__(self, raiseError=True, options={}):
if cyipopt is None and raiseError:
raise ImportError("Could not import cyipopt")

# IPOPT>=3.14 is required to save g_violation and grad_lag_x. Check IPOPT version.
if "save_major_iteration_variables" in options and (
"g_violation" in options["save_major_iteration_variables"]
or "grad_lag_x" in options["save_major_iteration_variables"]
):
ipopt_ver = cyipopt.IPOPT_VERSION
if ipopt_ver[0] < 3 or ipopt_ver[1] < 14:
raise RuntimeError(
f"IPOPT>=3.14 is required to save `g_violation` and `grad_lag_x`, but you have IPOPT v{ipopt_ver[0]}.{ipopt_ver[1]}."
)

super().__init__(
name,
category,
Expand All @@ -51,6 +62,9 @@ def __init__(self, raiseError=True, options={}):
# IPOPT needs Jacobians in coo format
self.jacType = "coo"

# List of pyIPOPT-specific options. We remove these from the list of options so these don't go into cyipopt.
self.pythonOptions = ["save_major_iteration_variables"]

@staticmethod
def _getInforms():
informs = {
Expand Down Expand Up @@ -85,6 +99,7 @@ def _getDefaultOptions():
"print_user_options": [str, "yes"],
"output_file": [str, "IPOPT.out"],
"linear_solver": [str, "mumps"],
"save_major_iteration_variables": [list, []],
}
return defOpts

Expand Down Expand Up @@ -207,7 +222,7 @@ def __call__(
jac["coo"][ICOL].copy().astype("int_"),
)

class CyIPOPTProblem:
class CyIPOPTProblem(cyipopt.Problem):
# Define the 4 call back functions that ipopt needs:
def objective(_, x):
fobj, fail = self._masterFunc(x, ["fobj"])
Expand Down Expand Up @@ -246,18 +261,58 @@ def jacobianstructure(_):

# Define intermediate callback. If this method returns false,
# Ipopt will terminate with the User_Requested_Stop status.
def intermediate(_, *args, **kwargs):
# Also save iteration info in the history file. This callback is called every "major" iteration but not in line search iterations.
# fmt: off
def intermediate(self_cyipopt, alg_mod, iter_count, obj_value, inf_pr, inf_du, mu, d_norm, regularization_size, alpha_du, alpha_pr, ls_trials):
# fmt: on
if self.storeHistory:
iterDict = {
"isMajor": True,
"feasibility": inf_pr,
"optimality": inf_du,
"mu": mu,
"step_primal": alpha_pr,
"step_dual": alpha_du,
}
# optional parameters
for saveVar in self.getOption("save_major_iteration_variables"):
if saveVar == "alg_mod":
iterDict[saveVar] = alg_mod
elif saveVar == "d_norm":
iterDict[saveVar] = d_norm
elif saveVar == "regularization_size":
iterDict[saveVar] = regularization_size
elif saveVar == "ls_trials":
iterDict[saveVar] = ls_trials
elif saveVar in ["g_violation", "grad_lag_x"]:
iterDict[saveVar] = self_cyipopt.get_current_violations()[saveVar]
else:
# IPOPT doesn't handle Python error well, so print an error message and send termination signal to IPOPT
print(f"ERROR: Received unknown IPOPT save variable `{saveVar}`. "
+ "Please see 'save_major_iteration_variables' option in the pyOptSparse "
+ "documentation under 'IPOPT'.")
print("Terminating IPOPT...")
return False

# Find pyoptsparse call counters for objective and constraints calls at current x.
# IPOPT calls objective and constraints separately, so we find two call counters and append iter_dict to both counters.
call_counter_1 = self.hist._searchCallCounter(self.cache["x"])
call_counter_2 = self.hist._searchCallCounter(self.cache["x"], last=call_counter_1 - 1)

for call_counter in [call_counter_2, call_counter_1]:
if call_counter is not None:
self.hist.write(call_counter, iterDict)

if self.userRequestedTermination is True:
return False
else:
return True

timeA = time.time()

nlp = cyipopt.Problem(
nlp = CyIPOPTProblem(
n=len(xs),
m=ncon,
problem_obj=CyIPOPTProblem(),
lb=blx,
ub=bux,
cl=blc,
Expand Down Expand Up @@ -300,4 +355,7 @@ def _set_ipopt_options(self, nlp):
# ---------------------------------------------

for name, value in self.options.items():
# skip pyIPOPT-specific options
if name in self.pythonOptions:
continue
nlp.add_option(name, value)
7 changes: 5 additions & 2 deletions pyoptsparse/pyOpt_history.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def read(self, key):
except KeyError:
return None

def _searchCallCounter(self, x):
def _searchCallCounter(self, x, last=None):
"""
Searches through existing callCounters, and finds the one corresponding
to an evaluation at the design vector `x`.
Expand All @@ -162,6 +162,8 @@ def _searchCallCounter(self, x):
----------
x : ndarray
The unscaled DV as a single array.
last : int, optional
The last callCounter to search from. If not provided, use the last callCounter in db.

Returns
-------
Expand All @@ -173,7 +175,8 @@ def _searchCallCounter(self, x):
-----
The tolerance used for this is the value `numpy.finfo(numpy.float64).eps`.
"""
last = int(self.db["last"])
if last is None:
last = int(self.db["last"])
callCounter = None
for i in range(last, 0, -1):
key = str(i)
Expand Down
6 changes: 3 additions & 3 deletions pyoptsparse/pyOpt_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -578,9 +578,9 @@ def _masterFunc2(self, x, evaluate, writeHist=True):
# timing
hist["time"] = time.time() - self.startTime

# Save information about major iteration counting (only matters for SNOPT).
if self.name == "SNOPT":
hist["isMajor"] = False # this will be updated in _snstop if it is major
# Save information about major iteration counting (only matters for SNOPT and IPOPT).
if self.name in ["SNOPT", "IPOPT"]:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just a curiosity for the moment.. do you think intermediate could be used in the same way _snstop is used? #420 is stalled now but I wonder if we could extend similar features to IPOPT.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I think so

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep, eventually we can reorganize and refactor that part of the logic, it's a fairly common feature for optimizers to provide a per-iteration callback functionality.

hist["isMajor"] = False # this will be updated in _snstop or cyipopt's `intermediate` if it is major
else:
hist["isMajor"] = True # for other optimizers we assume everything's major

Expand Down
27 changes: 26 additions & 1 deletion tests/test_hs015.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,12 @@
import numpy as np
from parameterized import parameterized

try:
# External modules
import cyipopt
except ImportError:
cyipopt = None

# First party modules
from pyoptsparse import OPT, History, Optimization
from pyoptsparse.testing import OptTest
Expand Down Expand Up @@ -130,7 +136,15 @@ def test_optimization(self, optName):
def test_ipopt(self):
self.optName = "IPOPT"
self.setup_optProb()
optOptions = self.optOptions.pop(self.optName, None)
store_vars = ["alg_mod", "d_norm", "regularization_size", "ls_trials"]
# check IPOPT version and add more variables to save_major_iteration_variables if IPOPT>=3.14
ipopt_314 = False
if cyipopt is not None:
ipopt_ver = cyipopt.IPOPT_VERSION
if ipopt_ver[0] >= 3 and ipopt_ver[1] >= 14:
ipopt_314 = True
store_vars.extend(["g_violation", "grad_lag_x"])
optOptions = {"save_major_iteration_variables": store_vars}
sol = self.optimize(optOptions=optOptions, storeHistory=True)
# Check Solution
self.assert_solution_allclose(sol, self.tol[self.optName])
Expand All @@ -144,6 +158,17 @@ def test_ipopt(self):
data_last = hist.read(hist.read("last"))
self.assertGreater(data_last["iter"], 0)

# Check entries in iteration data
data = hist.getValues(callCounters=["last"])
default_store_vars = ["feasibility", "optimality", "mu", "step_primal", "step_dual"]
for var in default_store_vars + store_vars:
self.assertIn(var, data.keys())
self.assertEqual(data["feasibility"].shape, (1, 1))
self.assertEqual(data["optimality"].shape, (1, 1))
if ipopt_314:
self.assertEqual(data["g_violation"].shape, (1, 2))
self.assertEqual(data["grad_lag_x"].shape, (1, 2))

# Make sure there is no duplication in objective history
data = hist.getValues(names=["obj"])
objhis_len = data["obj"].shape[0]
Expand Down
Loading