ReFRACtor
nlopt_constrained_test.py
Go to the documentation of this file.
1 from __future__ import print_function
2 from __future__ import division
3 from past.utils import old_div
4 from nose.tools import *
5 from full_physics import *
6 import math
7 try:
8  import nlopt
9 except ImportError:
10  pass # Ignore, these tests are just examples
11 from nose.plugins.skip import Skip, SkipTest
12 
14  '''This test is an example of using the nlopt package. We ended up
15  *not* using this, the solvers didn't work well with our problems. But
16  leave this test in place as an example of how to set this up in case
17  we want to return to this at some point.'''
18  raise SkipTest
19  config_file = "/home/smyth/Local/Level2PythonBuild/build/oco2_sounding_1_test/oco_oco2_sounding_1_test.config"
20  lua_config = "/home/smyth/Local/Level2PythonBuild/build/oco2_sounding_1_test/config_diff_solv.lua"
21  l2run = L2Run.create_from_existing_run(config_file, lua_config=lua_config)
22  opt_problem = l2run.lua_config.opt_problem
23  def func(x, grad):
24  print(x)
25  # Note gradient_x and cost_x are smart, so if called with the
26  # same x it doesn't calculate twice
27  if(grad.size > 0):
28  g = opt_problem.gradient_x(x)
29  grad[:] = g
30  return opt_problem.cost_x(x)
31  opt = nlopt.opt(nlopt.LD_MMA, opt_problem.parameter_size)
32  opt.set_min_objective(func)
33  low_x = np.zeros((opt_problem.parameter_size))
34  # Not sure about these, we'll want to look at these ranges
35  low_x[0:20] = 0
36  low_x[20] = 0
37  low_x[21] = 0
38  low_x[22] = -np.inf
39  low_x[23] = 1e-9
40  low_x[24] = 0
41  low_x[25] = 0
42  low_x[26] = 1e-9
43  low_x[27] = 0
44  low_x[28] = 0
45  low_x[29] = 1e-9
46  low_x[30] = 0
47  low_x[31] = 0
48  low_x[32] = 1e-9
49  low_x[33] = 0
50  low_x[34] = 0
51  low_x[35] = 0
52  low_x[36] = -np.inf
53  low_x[37] = 0
54  low_x[38] = -np.inf
55  low_x[39] = 0
56  low_x[40] = -np.inf
57  low_x[41] = 0
58  low_x[42] = -np.inf
59  low_x[43] = 0
60  low_x[44] = -np.inf
61  low_x[45] = 0
62  low_x[46] = -np.inf
63  low_x[47] = -np.inf
64  low_x[48] = -np.inf
65  opt.set_lower_bounds(low_x)
66  x = [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
67  0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
68  0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
69  0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
70  0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
71  1.00000000e-01, 1.02315449e+05, -9.00000000e-01, 9.14938609e-01,
72  0.00000000e+00, 9.50000000e-01, 1.00000000e-09, 0.00000000e+00,
73  9.50000000e-01, 1.00000000e-09, 0.00000000e+00, 0.00000000e+00,
74  1.00000000e-09, 0.00000000e+00, 1.00000000e+00, 0.00000000e+00,
75  -9.00000000e-01, 1.13937281e+00, 9.00000000e-01, 1.01898162e+00,
76  -9.00000000e-01, 1.65769130e+00, -8.99982524e-01, 6.90711580e-01,
77  -8.99963735e-01, 2.94325091e+00, 9.00046938e-01, -2.06407296e-14,
78  1.80000000e-03]
79  opt.optimize(opt_problem.parameters)
80 
81 def nlopt_test():
82  '''This is from the tutorial'''
83  raise SkipTest
84  def myfunc(x, grad):
85  if grad.size > 0:
86  grad[0] = 0.0
87  grad[1] = old_div(0.5, math.sqrt(x[1]))
88  return math.sqrt(x[1])
89 
90  def myconstraint(x, grad, a, b):
91  if grad.size > 0:
92  grad[0] = 3 * a * (a*x[0] + b)**2
93  grad[1] = -1.0
94  return (a*x[0] + b)**3 - x[1]
95 
96  opt = nlopt.opt(nlopt.LD_MMA, 2)
97  opt.set_lower_bounds([-float('inf'), 0])
98  opt.set_min_objective(myfunc)
99  opt.add_inequality_constraint(lambda x,grad: myconstraint(x,grad,2,0), 1e-8)
100  opt.add_inequality_constraint(lambda x,grad: myconstraint(x,grad,-1,1), 1e-8)
101  opt.set_xtol_rel(1e-4)
102  x = opt.optimize([1.234, 5.678])
103  minf = opt.last_optimum_value()
104  print("optimum at ", x[0],x[1])
105  print("minimum value = ", minf)
106  print("result code = ", opt.last_optimize_result())

Copyright © 2017, California Institute of Technology.
ALL RIGHTS RESERVED.
U.S. Government Sponsorship acknowledged.
Generated Fri Aug 24 2018 15:44:10