-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathref_numerical_optimal_control.bib
598 lines (555 loc) · 48.5 KB
/
ref_numerical_optimal_control.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
@article{acikmeseGFOLDRealTimeImplementable2012,
title = {G-{{FOLD}}: {{A Real-Time Implementable Fuel Optimal Large Divert Guidance Algorithm}} for {{Planetary Pinpoint Landing}}},
shorttitle = {G-{{FOLD}}},
author = {A{\c c}{\i}kme{\c s}e, Beh{\c c}et and Casoliva, Jordi and Carson, John and Blackmore, Lars},
year = {2012},
month = jun,
journal = {LPI Contributions},
pages = {4193},
abstract = {We describe an optimal large divert powered descent guidance algorithm (G-FOLD) developed at JPL that autonomously computes the fuel optimal path that takes the lander to a given surface target without violating any mission constraints.}
}
@article{andersonIndirectNumericalMethod1972,
title = {An Indirect Numerical Method for the Solution of a Class of Optimal Control Problems with Singular Arcs},
author = {Anderson, G.},
year = {1972},
month = jun,
journal = {IEEE Transactions on Automatic Control},
volume = {17},
number = {3},
pages = {363--365},
issn = {0018-9286},
doi = {10.1109/TAC.1972.1099989},
abstract = {An indirect numerical method is presented that solves a class of optimal control problems that have a singular arc occurring after an initial nonsingular arc. This method iterates on the subset of initial costate variables that enforce the junction conditions for switching to a singular arc, and the time of switching off of the singular arc to a final nonsingular arc, to reduce a terminal error function of the final conditions to zero. This results in the solution to the two-point boundary-value problem obtained using the minimum principle and some necessary conditions for singular arcs. The main advantage of this method is that the exact solution to the two-point boundary-value problem is obtained. The main disadvantage is that the sequence of controls for the problem must be known to apply this method. Two illustrative examples are presented.}
}
@article{anderssonCasADiSoftwareFramework2019,
title = {{{CasADi}}: A Software Framework for Nonlinear Optimization and Optimal Control},
shorttitle = {{{CasADi}}},
author = {Andersson, Joel A. E. and Gillis, Joris and Horn, Greg and Rawlings, James B. and Diehl, Moritz},
year = {2019},
month = mar,
journal = {Mathematical Programming Computation},
volume = {11},
number = {1},
pages = {1--36},
issn = {1867-2957},
doi = {10.1007/s12532-018-0139-4},
url = {https://doi.org/10.1007/s12532-018-0139-4},
urldate = {2021-04-08},
abstract = {We present CasADi, an open-source software framework for numerical optimization. CasADi is a general-purpose tool that can be used to model and solve optimization problems with a large degree of flexibility, larger than what is associated with popular algebraic modeling languages such as AMPL, GAMS, JuMP or Pyomo. Of special interest are problems constrained by differential equations, i.e. optimal control problems. CasADi is written in self-contained C++, but is most conveniently used via full-featured interfaces to Python, MATLAB or Octave. Since its inception in late 2009, it has been used successfully for academic teaching as well as in applications from multiple fields, including process control, robotics and aerospace. This article gives an up-to-date and accessible introduction to the CasADi framework, which has undergone numerous design improvements over the last 7~years.},
langid = {english}
}
@book{bettsPracticalMethodsOptimal2009,
title = {Practical {{Methods}} for {{Optimal Control}} and {{Estimation Using Nonlinear Programming}}},
author = {Betts, John T.},
year = {2009},
month = nov,
edition = {2},
publisher = {Society for Industrial \& Applied Mathematics},
address = {Philadelphia},
abstract = {This second edition of the popular text by John Betts incorporates lots of new material while maintaining the concise and focused presentation of the original edition. The book describes how sparse optimization methods can be combined with discretization techniques for differential-algebraic equations and used to solve optimal control and estimation problems. The interaction between optimization and integration is emphasized throughout the book. Practical Methods for Optimal Control and Estimation Using Nonlinear Programming, Second Edition includes presentation of relevant background in nonlinear programming methods that exploit sparse matrix technology, along with description of discretization techniques for solving differential-algebraic equations and an extensive collection of example problems that demonstrate the methods. The SOCS software referenced within the book can be licensed from Boeing by readers interested in receiving the code and training materials for further investigation. Audience: This book will appeal to users of optimal control working in fields such as the aerospace industry; chemical process control; mathematical biology; robotics and multibody simulation; and electrical, mechanical, and structural engineering. It can also be a primary or supplemental text for graduate courses on optimal control methods. Contents: Preface; Chapter 1: Introduction to Nonlinear Programming; Chapter 2: Large, Sparse Nonlinear Programming; Chapter 3: Optimal Control Preliminaries; Chapter 4: The Optimal Control Problem; Chapter 5: Parameter Estimation; Chapter 6: Optimal Control Examples; Chapter 7: Advanced Applications; Chapter 8: Epilogue; Appendix: Software; Bibliography; Index},
isbn = {978-0-89871-688-7},
langid = {english},
annotation = {00000}
}
@book{bettsPracticalMethodsOptimal2020,
title = {Practical {{Methods}} for {{Optimal Control Using Nonlinear Programming}}},
author = {Betts, John T.},
year = {2020},
month = jan,
series = {Advances in {{Design}} and {{Control}}},
edition = {3},
publisher = {{Society for Industrial and Applied Mathematics}},
doi = {10.1137/1.9781611976199},
url = {https://epubs.siam.org/doi/book/10.1137/1.9781611976199},
urldate = {2021-04-15},
abstract = {Solving an optimal control problem is not easy. Pieces of the puzzle are found scattered throughout many different disciplines. Furthermore, the focus of this book is on practical methods, that is, methods that I have found actually work! In fact, everything described in this book has been implemented in production software and used to solve real optimal control problems. Although the reader should be proficient in advanced mathematics, no theorems are presented.},
isbn = {978-1-61197-618-2}
}
@article{bettsSurveyNumericalMethods1998,
title = {Survey of {{Numerical Methods}} for {{Trajectory Optimization}}},
author = {Betts, John T.},
year = {1998},
journal = {Journal of Guidance, Control, and Dynamics},
volume = {21},
number = {2},
pages = {193--207},
issn = {0731-5090},
doi = {10.2514/2.4231},
url = {http://dx.doi.org/10.2514/2.4231},
urldate = {2016-04-13}
}
@techreport{bieglerSolutionDynamicOptimization1983,
title = {Solution of {{Dynamic Optimization Problems}} by {{Successive Quadratic Programming}} and {{Orthogonal Collocation}}.},
author = {Biegler, L. T.},
year = {1983},
month = dec,
copyright = {APPROVED FOR PUBLIC RELEASE},
langid = {english},
annotation = {00357}
}
@misc{byczkowskiMethodSolvingStatePath2023,
title = {Method for {{Solving State-Path Constrained Optimal Control Problems Using Adaptive Radau Collocation}}},
author = {Byczkowski, Cale A. and Rao, Anil V.},
year = {2023},
month = apr,
number = {arXiv:2304.06130},
eprint = {2304.06130},
primaryclass = {math},
publisher = {arXiv},
url = {http://arxiv.org/abs/2304.06130},
urldate = {2023-04-18},
abstract = {A new method is developed for accurately approximating the solution to state-variable inequality path constrained optimal control problems using a multiple-domain adaptive Legendre-Gauss-Radau collocation method. The method consists of the following parts. First, a structure detection method is developed to estimate switch times in the activation and deactivation of state-variable inequality path constraints. Second, using the detected structure, the domain is partitioned into multiple-domains where each domain corresponds to either a constrained or an unconstrained segment. Furthermore, additional decision variables are introduced in the multiple-domain formulation, where these additional decision variables represent the switch times of the detected active state-variable inequality path constraints. Within a constrained domain, the path constraint is differentiated with respect to the independent variable until the control appears explicitly, and this derivative is set to zero along the constrained arc while all preceding derivatives are set to zero at the start of the constrained arc. The time derivatives of the active state-variable inequality path constraints are computed using automatic differentiation and the properties of the chain rule. The method is demonstrated on two problems, the first being a benchmark optimal control problem which has a known analytical solution and the second being a challenging problem from the field of aerospace engineering in which there is no known analytical solution. When compared against previously developed adaptive Legendre-Gauss-Radau methods, the results show that the method developed in this paper is capable of computing accurate solutions to problems whose solution contain active state-variable inequality path constraints.},
archiveprefix = {arxiv}
}
@article{conwaySurveyMethodsAvailable2011,
title = {A {{Survey}} of {{Methods Available}} for the {{Numerical Optimization}} of {{Continuous Dynamic Systems}}},
author = {Conway, Bruce A.},
year = {2011},
month = sep,
journal = {Journal of Optimization Theory and Applications},
volume = {152},
number = {2},
pages = {271--306},
issn = {0022-3239, 1573-2878},
doi = {10.1007/s10957-011-9918-z},
url = {http://link.springer.com/article/10.1007/s10957-011-9918-z},
urldate = {2016-06-09},
abstract = {There has been significant progress in the development of numerical methods for the determination of optimal trajectories for continuous dynamic systems, especially in the last 20 years. In the 1980s, the principal contribution was new methods for discretizing the continuous system and converting the optimization problem into a nonlinear programming problem. This has been a successful approach that has yielded optimal trajectories for very sophisticated problems. In the last 15--20 years, researchers have applied a qualitatively different approach, using evolutionary algorithms or metaheuristics, to solve similar parameter optimization problems. Evolutionary algorithms use the principle of ``survival of the fittest'' applied to a population of individuals representing candidate solutions for the optimal trajectories. Metaheuristics optimize by iteratively acting to improve candidate solutions, often using stochastic methods. In this paper, the advantages and disadvantages of these recently developed methods are described and an attempt is made to answer the question of what is now the best extant numerical solution method.},
langid = {english},
annotation = {00067}
}
@article{febboNLOptControlModelingLanguage2020,
title = {{{NLOptControl}}: {{A}} Modeling Language for Solving Optimal Control Problems},
shorttitle = {{{NLOptControl}}},
author = {Febbo, Huckleberry and Jayakumar, Paramsothy and Stein, Jeffrey L. and Ersal, Tulga},
year = {2020},
month = apr,
journal = {arXiv:2003.00142 [cs, math]},
eprint = {2003.00142},
primaryclass = {cs, math},
url = {http://arxiv.org/abs/2003.00142},
urldate = {2021-04-07},
abstract = {Current direct-collocation-based optimal control software is either easy to use or fast, but not both. This is a major limitation for users that are trying to formulate complex optimal control problems (OCPs) for use in on-line applications. This paper introduces NLOptControl, an open-source modeling language that allows users to both easily formulate and quickly solve nonlinear OCPs using direct-collocation methods. To achieve these attributes, NLOptControl (1) is written in an efficient, dynamically-typed computing language called Julia, (2) extends an optimization modeling language called JuMP to provide a natural algebraic syntax for modeling nonlinear OCPs; and (3) uses reverse automatic differentiation with the acrylic-coloring method to exploit sparsity in the Hessian matrix. This work explores the novel design features of NLOptControl and compares its syntax and speed to those of PROPT. The syntax comparisons shows that NLOptControl models OCPs more concisely than PROPT. The speeds of various collocation methods within PROPT and NLOptControl are benchmarked over a range of collocation points using performance profiles; overall, NLOptControl's single, two, and four interval pseudospectral methods are roughly \$14\$, \$26\$, and \$36\$ times faster than PROPT's, respectively. NLOptControl is well-suited to improve existing off-line and on-line control systems and to engender new ones.},
archiveprefix = {arxiv}
}
@inproceedings{filoFunctionSpaceApproach2018,
title = {Function {{Space Approach}} for {{Gradient Descent}} in {{Optimal Control}}},
booktitle = {2018 {{Annual American Control Conference}} ({{ACC}})},
author = {Filo, M. and Bamieh, B.},
year = {2018},
month = jun,
pages = {3447--3453},
doi = {10.23919/ACC.2018.8430794},
abstract = {We develop an iterative numerical method for open-loop optimal control problems based on constrained-gradient descent in function space. The descent algorithm uses a projection of the gradient onto the tangent space of the manifold representing the constraints of the dynamical equation. A key feature of our algorithm is preconditioning of the quadratic portion of the cost functional with a relatively simple procedure. This preconditioning has a simple geometric interpretation, which gives intuitive reasoning for the rapid convergence of the iterations in neighborhoods of local optima. We give two illustrative examples including a continuous stirred-tank chemical reactor, and a bilinear quantum system. The relation of this algorithm to earlier proposed Banach space projection algorithms is outlined.}
}
@article{gargUnifiedFrameworkNumerical2010,
title = {A Unified Framework for the Numerical Solution of Optimal Control Problems Using Pseudospectral Methods},
author = {Garg, Divya and Patterson, Michael and Hager, William W. and Rao, Anil V. and Benson, David A. and Huntington, Geoffrey T.},
year = {2010},
month = nov,
journal = {Automatica},
volume = {46},
number = {11},
pages = {1843--1851},
issn = {0005-1098},
doi = {10.1016/j.automatica.2010.06.048},
url = {http://www.sciencedirect.com/science/article/pii/S0005109810002980},
urldate = {2019-04-11},
abstract = {A unified framework is presented for the numerical solution of optimal control problems using collocation at Legendre--Gauss (LG), Legendre--Gauss--Radau (LGR), and Legendre--Gauss--Lobatto (LGL) points. It is shown that the LG and LGR differentiation matrices are rectangular and full rank whereas the LGL differentiation matrix is square and singular. Consequently, the LG and LGR schemes can be expressed equivalently in either differential or integral form, while the LGL differential and integral forms are not equivalent. Transformations are developed that relate the Lagrange multipliers of the discrete nonlinear programming problem to the costates of the continuous optimal control problem. The LG and LGR discrete costate systems are full rank while the LGL discrete costate system is rank-deficient. The LGL costate approximation is found to have an error that oscillates about the true solution and this error is shown by example to be due to the null space in the LGL discrete costate system. An example is considered to assess the accuracy and features of each collocation scheme.}
}
@phdthesis{gathCAMTOSSoftwareSuite2002,
title = {{{CAMTOS}} - {{A Software Suite Combining Direct}} and {{Indirect Trajectory Optimization Methods}}},
author = {Gath, Peter F},
year = {2002},
url = {https://convexoptimization.com/TOOLS/PeterHistos.pdf},
langid = {english},
school = {Universit{\"a}t Stuttgart}
}
@article{gillSNOPTSQPAlgorithm2005,
title = {{{SNOPT}}: {{An SQP Algorithm}} for {{Large-Scale Constrained Optimization}}},
shorttitle = {{{SNOPT}}},
author = {Gill, Philip E. and Murray, Walter and Saunders, Michael A.},
year = {2005},
month = jan,
journal = {SIAM Review},
volume = {47},
number = {1},
pages = {99--131},
publisher = {{Society for Industrial and Applied Mathematics}},
issn = {0036-1445},
doi = {10.1137/S0036144504446096},
url = {https://epubs.siam.org/doi/abs/10.1137/S0036144504446096},
urldate = {2021-04-08},
abstract = {Sequential quadratic programming (SQP) methods have proved highly effective for solving constrained optimization problems with smooth nonlinear functions in the objective and constraints. Here we consider problems with general inequality constraints (linear and nonlinear). We assume that first derivatives are available and that the constraint gradients are sparse. Second derivatives are assumed to be unavailable or too expensive to calculate. We discuss an SQP algorithm that uses a smooth augmented Lagrangian merit function and makes explicit provision for infeasibility in the original problem and the QP subproblems. The Hessian of the Lagrangian is approximated using a limited-memory quasi-Newton method. SNOPT is a particular implementation that uses a reduced-Hessian semidefinite QP solver (SQOPT) for the QP subproblems. It is designed for problems with many thousands of constraints and variables but is best suited for problems with a moderate number of degrees of freedom (say, up to 2000). Numerical results are given for most of the CUTEr and COPS test collections (about 1020 examples of all sizes up to 40000 constraints and variables, and up to 20000 degrees of freedom).}
}
@misc{grosNumericalOptimalControl2020,
title = {Numerical {{Optimal Control}} (Draft)},
author = {Gros, S{\'e}bastien and Diehl, Moritz},
year = {2020},
month = may,
address = {KU Leuven},
url = {https://www.syscop.de/teaching/ss2017/numerical-optimal-control},
annotation = {00041}
}
@misc{grosNumericalOptimalControl2022,
title = {Numerical {{Optimal Control}} ({{Draft}})},
author = {Gros, Sebastien and Diehl, Moritz},
year = {2022},
month = apr,
address = {Systems Control and Optimization Laboratory IMTEK, Faculty of Engineering, University of Freiburg},
url = {https://www.syscop.de/files/2020ss/NOC/book-NOCSE.pdf},
langid = {english}
}
@article{gurtnerBallDoubleHoop2017,
title = {Ball in Double Hoop: Demonstration Model for Numerical Optimal Control},
shorttitle = {Ball in Double Hoop},
author = {Gurtner, Martin and Zem{\'a}nek, Ji{\v r}{\'i}},
year = {2017},
month = jul,
journal = {IFAC-PapersOnLine},
series = {20th {{IFAC World Congress}}},
volume = {50},
number = {1},
pages = {2379--2384},
issn = {2405-8963},
doi = {10.1016/j.ifacol.2017.08.429},
url = {http://www.sciencedirect.com/science/article/pii/S2405896317307796},
urldate = {2019-12-17},
abstract = {Ball and hoop system is a well-known model for the education of linear control systems. In this paper, we have a look at this system from another perspective and show that it is also suitable for demonstration of more advanced control techniques. In contrast to the standard use, we describe the dynamics of the system at full length; in addition to the mode where the ball rolls on the (outer) hoop we also consider the mode where the ball drops out of the hoop and enters a free-fall mode. Furthermore, we add another (inner) hoop in the center upon which the ball can land from the free-fall mode. This constitutes another mode of the hybrid description of the system. We present two challenging tasks for this model and show how they can be solved by trajectory generation and stabilization. We also describe how such a model can be built and experimentally verify the validity of our approach solving the proposed tasks. **All codes and drawings are available at http://github.com/aa4cc/flying-ball-in-hoop},
langid = {english}
}
@article{hargravesDirectTrajectoryOptimization1987,
title = {Direct Trajectory Optimization Using Nonlinear Programming and Collocation},
author = {Hargraves, C. R. and Paris, S. W.},
year = {1987},
journal = {Journal of Guidance, Control, and Dynamics},
volume = {10},
number = {4},
pages = {338--342},
issn = {0731-5090},
doi = {10.2514/3.20223},
url = {http://dx.doi.org/10.2514/3.20223},
urldate = {2016-04-13}
}
@article{hauserProjectionOperatorApproach2002,
title = {A Projection Operator Approach to the Optimization of Trajectory Functionals},
author = {Hauser, John},
year = {2002},
month = jan,
journal = {IFAC Proceedings Volumes},
series = {15th {{IFAC World Congress}}},
volume = {35},
number = {1},
pages = {377--382},
issn = {1474-6670},
doi = {10.3182/20020721-6-ES-1901.00312},
url = {http://www.sciencedirect.com/science/article/pii/S1474667015387334},
urldate = {2019-02-05},
abstract = {We develop a Newton method for the optimization of trajectory functionals. Through the use of a trajectory tracking nonlinear projection operator, the dynamically constrained optimization problem is converted into an unconstrained problem, making many aspects of the algorithm rather transparent. Examples: first and second order optimality conditions, search direction and step length calculations, update rule---all developed from an unconstrained point of view. Quasi-Newton methods are easily developed as well, allowing straightforward globalization of the Newton method. As all operations are set in an appropriate Banach space, properties such as solution regularity are retained so that implementation decisions (level of discretation, etc.) are based on approximating the solution rather than the problem. Convergence in Banach space is shown to be quadratic as is usual for Newton methods.}
}
@phdthesis{hauslerMissionPlanningMultiple2015,
title = {Mission {{Planning}} for {{Multiple Cooperative Robotic Vehicles}}},
author = {H{\"a}usler, Andreas Johannes},
year = {2015},
school = {UNIVERSIDADE DE LISBOA INSTITUTO SUPERIOR T{\'E}CNICO}
}
@inproceedings{howellALTROFastSolver2019,
title = {{{ALTRO}}: {{A Fast Solver}} for {{Constrained Trajectory Optimization}}},
shorttitle = {{{ALTRO}}},
booktitle = {2019 {{IEEE}}/{{RSJ International Conference}} on {{Intelligent Robots}} and {{Systems}} ({{IROS}})},
author = {Howell, T. A. and Jackson, B. E. and Manchester, Z.},
year = {2019},
month = nov,
pages = {7674--7679},
issn = {2153-0866},
doi = {10.1109/IROS40897.2019.8967788},
abstract = {Trajectory optimization is a widely used tool for robot motion planning and control. Existing solvers for these problems either rely on off-the-shelf nonlinear programming solvers that are numerically robust and capable of handling arbitrary constraints, but tend to be slow because they are general purpose; or they use custom numerical methods that take advantage of the problem structure to be fast, but often lack robustness and have limited or no ability to reason about constraints. This paper presents ALTRO (Augmented Lagrangian TRajectory optimizer), a solver for constrained trajectory optimization problems that handles general nonlinear state and input constraints and offers fast convergence and numerical robustness thanks to careful exploitation of problem structure. We demonstrate its performance on a set of benchmark motion-planning problems and offer comparisons to the standard direct collocation method with large-scale sequential quadratic programming and interior-point solvers.}
}
@misc{johnsonNLopt2021,
title = {{{NLopt}}},
author = {Johnson, Steven G.},
year = {2021},
month = apr,
url = {https://github.com/stevengj/nlopt},
urldate = {2021-04-08},
abstract = {library for nonlinear optimization, wrapping many algorithms for global and local, constrained or unconstrained, optimization},
copyright = {View license , View license}
}
@article{kelleyGradientTheoryOptimal1960,
title = {Gradient {{Theory}} of {{Optimal Flight Paths}}},
author = {Kelley, Henry J.},
year = {1960},
journal = {ARS Journal},
volume = {30},
number = {10},
pages = {947--954},
doi = {10.2514/8.5282},
url = {https://doi.org/10.2514/8.5282},
urldate = {2019-04-11}
}
@article{kellyIntroductionTrajectoryOptimization2017,
title = {An {{Introduction}} to {{Trajectory Optimization}}: {{How}} to {{Do Your Own Direct Collocation}}},
shorttitle = {An {{Introduction}} to {{Trajectory Optimization}}},
author = {Kelly, Matthew},
year = {2017},
month = jan,
journal = {SIAM Review},
volume = {59},
number = {4},
pages = {849--904},
issn = {0036-1445},
doi = {10.1137/16M1062569},
url = {https://epubs.siam.org/doi/abs/10.1137/16M1062569},
urldate = {2020-01-04},
abstract = {This paper is an introductory tutorial for numerical trajectory optimization with a focus on direct collocation methods. These methods are relatively simple to understand and effectively solve a wide variety of trajectory optimization problems. Throughout the paper we illustrate each new set of concepts by working through a sequence of four example problems. We start by using trapezoidal collocation to solve a simple one-dimensional toy problem and work up to using Hermite--Simpson collocation to compute the optimal gait for a bipedal walking robot. Along the way, we cover basic debugging strategies and guidelines for posing well-behaved optimization problems. The paper concludes with a short overview of other methods for trajectory optimization. We also provide an electronic supplement that contains well-documented MATLAB code for all examples and methods presented. Our primary goal is to provide the reader with the resources necessary to understand and successfully implement their own direct collocation methods. (An erratum is attached.)}
}
@article{kellyTranscriptionMethodsTrajectory2017,
title = {Transcription {{Methods}} for {{Trajectory Optimization}}: A Beginners Tutorial},
shorttitle = {Transcription {{Methods}} for {{Trajectory Optimization}}},
author = {Kelly, Matthew P.},
year = {2017},
month = jul,
journal = {arXiv:1707.00284 [math]},
eprint = {1707.00284},
primaryclass = {math},
url = {http://arxiv.org/abs/1707.00284},
urldate = {2021-04-06},
abstract = {This report is an introduction to transcription methods for trajectory optimization techniques. The first few sections describe the two classes of transcription methods (shooting {\textbackslash}\& simultaneous) that are used to convert the trajectory optimization problem into a general constrained optimization form. The middle of the report discusses a few extensions to the basic methods, including how to deal with hybrid systems (such as walking robots). The final section goes over a variety of implementation details.},
archiveprefix = {arxiv}
}
@article{malyutaAdvancesTrajectoryOptimization2021,
title = {Advances in Trajectory Optimization for Space Vehicle Control},
author = {Malyuta, Danylo and Yu, Yue and Elango, Purnanand and A{\c c}{\i}kme{\c s}e, Beh{\c c}et},
year = {2021},
month = nov,
journal = {Annual Reviews in Control},
issn = {1367-5788},
doi = {10.1016/j.arcontrol.2021.04.013},
url = {https://www.sciencedirect.com/science/article/pii/S1367578821000377},
urldate = {2021-11-11},
abstract = {Space mission design places a premium on cost and operational efficiency. The search for new science and life beyond Earth calls for spacecraft that can deliver scientific payloads to geologically rich yet hazardous landing sites. At the same time, the last four decades of optimization research have put a suite of powerful optimization tools at the fingertips of the controls engineer. As we enter the new decade, optimization theory, algorithms, and software tooling have reached a critical mass to start seeing serious application in space vehicle guidance and control systems. This survey paper provides a detailed overview of recent advances, successes, and promising directions for optimization-based space vehicle control. The considered applications include planetary landing, rendezvous and proximity operations, small body landing, constrained attitude reorientation, endo-atmospheric flight including ascent and reentry, and orbit transfer and injection. The primary focus is on the last ten years of progress, which have seen a veritable rise in the number of applications using three core technologies: lossless convexification, sequential convex programming, and model predictive control. The reader will come away with a well-rounded understanding of the state-of-the-art in each space vehicle control application, and will be well positioned to tackle important current open problems using convex optimization as a core technology.},
langid = {english}
}
@article{malyutaConvexOptimizationTrajectory2021,
title = {Convex {{Optimization}} for {{Trajectory Generation}}},
author = {Malyuta, Danylo and Reynolds, Taylor P. and Szmuk, Michael and Lew, Thomas and Bonalli, Riccardo and Pavone, Marco and Acikmese, Behcet},
year = {2021},
month = jun,
journal = {arXiv:2106.09125 [cs, eess, math]},
eprint = {2106.09125},
primaryclass = {cs, eess, math},
url = {http://arxiv.org/abs/2106.09125},
urldate = {2021-12-03},
abstract = {Reliable and efficient trajectory generation methods are a fundamental need for autonomous dynamical systems of tomorrow. The goal of this article is to provide a comprehensive tutorial of three major convex optimization-based trajectory generation methods: lossless convexification (LCvx), and two sequential convex programming algorithms known as SCvx and GuSTO. In this article, trajectory generation is the computation of a dynamically feasible state and control signal that satisfies a set of constraints while optimizing key mission objectives. The trajectory generation problem is almost always nonconvex, which typically means that it is not readily amenable to efficient and reliable solution onboard an autonomous vehicle. The three algorithms that we discuss use problem reformulation and a systematic algorithmic strategy to nonetheless solve nonconvex trajectory generation tasks through the use of a convex optimizer. The theoretical guarantees and computational speed offered by convex optimization have made the algorithms popular in both research and industry circles. To date, the list of applications includes rocket landing, spacecraft hypersonic reentry, spacecraft rendezvous and docking, aerial motion planning for fixed-wing and quadrotor vehicles, robot motion planning, and more. Among these applications are high-profile rocket flights conducted by organizations like NASA, Masten Space Systems, SpaceX, and Blue Origin. This article aims to give the reader the tools and understanding necessary to work with each algorithm, and to know what each method can and cannot do. A publicly available source code repository supports the provided numerical examples. By the end of the article, the reader should be ready to use the methods, to extend them, and to contribute to their many exciting modern applications.},
archiveprefix = {arxiv}
}
@article{malyutaConvexOptimizationTrajectory2022,
title = {Convex {{Optimization}} for {{Trajectory Generation}}: {{A Tutorial}} on {{Generating Dynamically Feasible Trajectories Reliably}} and {{Efficiently}}},
shorttitle = {Convex {{Optimization}} for {{Trajectory Generation}}},
author = {Malyuta, Danylo and Reynolds, Taylor P. and Szmuk, Michael and Lew, Thomas and Bonalli, Riccardo and Pavone, Marco and A{\c c}{\i}kme{\c s}e, Beh{\c c}et},
year = {2022},
month = oct,
journal = {IEEE Control Systems Magazine},
volume = {42},
number = {5},
pages = {40--113},
issn = {1941-000X},
doi = {10.1109/MCS.2022.3187542},
abstract = {Reliable and efficient trajectory generation methods are a fundamental need for autonomous dynamical systems. The goal of this article is to provide a comprehensive tutorial of three major convex optimization-based trajectory generation methods: lossless convexification (LCvx) and two sequential convex programming algorithms, successive convexification (SCvx) and guaranteed sequential trajectory optimization (GuSTO). Trajectory generation is defined as the computation of a dynamically feasible state and control signal that satisfies a set of constraints while optimizing key mission objectives. The trajectory generation problem is almost always nonconvex, which typically means that it is difficult to solve efficiently and reliably onboard an autonomous vehicle. The three algorithms that we discuss use problem reformulation and a systematic algorithmic strategy to nonetheless solve nonconvex trajectory generation tasks using a convex optimizer. The theoretical guarantees and computational speed offered by convex optimization have made the algorithms popular in both research and industry circles. The growing list of applications includes rocket landing, spacecraft hypersonic reentry, spacecraft rendezvous and docking, aerial motion planning for fixed-wing and quadrotor vehicles, robot motion planning, and more. Among these applications are high-profile rocket flights conducted by organizations such as NASA, Masten Space Systems, SpaceX, and Blue Origin. This article equips the reader with the tools and understanding necessary to work with each algorithm and know their advantages and limitations. An open source tool called the SCP Toolbox accompanies the article and provides a practical implementation of every numerical example. By the end of the article, the reader will not only be ready to use the lossless convexification and sequential convex programming algorithms, but also to extend them and to contribute to their many exciting modern applications.}
}
@article{mehraGeneralizedGradientMethod1972,
title = {A Generalized Gradient Method for Optimal Control Problems with Inequality Constraints and Singular Arcs},
author = {Mehra, R. and Davis, R.},
year = {1972},
month = feb,
journal = {IEEE Transactions on Automatic Control},
volume = {17},
number = {1},
pages = {69--79},
issn = {0018-9286},
doi = {10.1109/TAC.1972.1099881},
abstract = {The steepest descent methods of Bryson and Ho [1] and Kelly [6] and the conjugate gradient method of Lasdon, Mitter, and Waren [3] use control variables as the independent variables in the search procedure. The inequality constraints are often handled via penalty functions which result in poor convergence. Special difficulties are encountered in handling state variable inequality constraints and singular arcs [1]. This paper shows that these difficulties arise due to the exclusive use of control variables as the independent variables in the search procedure. An algorithm based on the generalized reduced gradient (GRG) algorithm of Abadie and Carpentier [5] and Abadie [7] for nonlinear programming is proposed to solve these problems. The choice of the independent variables in this algorithm is dictated by the constraints on the problem and could result in different combinations of state and control variables as independent variables along different parts of the trajectory. The gradient of the cost function with respect to the independent variables, called the generalized gradient, is calculated by solving a set of equations similar to the Euler-Lagrange equations. The directions of search are determined using gradient projection and the conjugate gradient method. Two numerical examples involving state variable inequality constraints are solved [2]. The method is then applied to two examples containing singular arcs and it is shown that these problems can be handled as regular problems by choosing some of the state variables as the independent variables. The relationship of the method to the reduced gradient method of Wolfe [4] and the generalized reduced method of Abadie [7] for nonlinear programming is shown.}
}
@misc{NLOptControlJl2021,
title = {{{NLOptControl}}.Jl},
year = {2021},
month = apr,
url = {https://github.com/JuliaMPC/NLOptControl.jl},
urldate = {2021-04-08},
abstract = {nonlinear control optimization tool. Contribute to JuliaMPC/NLOptControl.jl development by creating an account on GitHub.},
copyright = {View license , View license},
howpublished = {Julia MPC}
}
@article{ohUseOrthogonalCollocation1977,
title = {Use of Orthogonal Collocation Method in Optimal Control Problems},
author = {Oh, S. H. and Luus, R.},
year = {1977},
month = nov,
journal = {International Journal of Control},
volume = {26},
number = {5},
pages = {657--673},
issn = {0020-7179},
doi = {10.1080/00207177708922339},
url = {http://dx.doi.org/10.1080/00207177708922339},
urldate = {2016-06-09},
abstract = {A computational method for optimal control problems is proposed. The method, which is based on the polynomial expansions of the state and adjoint variables, is efficient and can be applied to a variety of optimal control problems. The usefulness of the proposed method is demonstrated with several examples.},
annotation = {00023}
}
@misc{OpenOCL2021,
title = {{{OpenOCL}}},
year = {2021},
month = apr,
url = {https://github.com/OpenOCL/OpenOCL},
urldate = {2021-04-08},
abstract = {Open Optimal Control Library for Matlab. Trajectory Optimization and non-linear Model Predictive Control (MPC) toolbox.},
copyright = {BSD-3-Clause License , BSD-3-Clause License},
howpublished = {OpenOCL}
}
@phdthesis{paivaNumericalMethodsOptimal2014,
title = {Numerical {{Methods}} for {{Optimal Control}} and {{Model Predictive Control}}},
author = {Paiva, Lu{\'i}s Tiago de Freixo Ramos},
year = {2014},
url = {https://core.ac.uk/download/pdf/143392316.pdf}
}
@misc{pattersonGeneralPurposeMATLABSoftware,
title = {A {{General-Purpose MATLAB Software}} for {{Solving Multiple-Phase Optimal Control Problems}}},
author = {Patterson, Michael A and Rao, Anil V},
year = {2016},
month = dec,
url = {https://www.gpops2.com/}
}
@article{pattersonGPOPSIIMATLABSoftware2014,
title = {{{GPOPS-II}}: {{A MATLAB Software}} for {{Solving Multiple-Phase Optimal Control Problems Using}} Hp-{{Adaptive Gaussian Quadrature Collocation Methods}} and {{Sparse Nonlinear Programming}}},
shorttitle = {{{GPOPS-II}}},
author = {Patterson, Michael A. and Rao, Anil V.},
year = {2014},
month = oct,
journal = {ACM Trans. Math. Softw.},
volume = {41},
number = {1},
pages = {1:1--1:37},
issn = {0098-3500},
doi = {10.1145/2558904},
url = {http://doi.acm.org/10.1145/2558904},
urldate = {2016-06-09},
abstract = {A general-purpose MATLAB software program called GPOPS--II is described for solving multiple-phase optimal control problems using variable-order Gaussian quadrature collocation methods. The software employs a Legendre-Gauss-Radau quadrature orthogonal collocation method where the continuous-time optimal control problem is transcribed to a large sparse nonlinear programming problem (NLP). An adaptive mesh refinement method is implemented that determines the number of mesh intervals and the degree of the approximating polynomial within each mesh interval to achieve a specified accuracy. The software can be interfaced with either quasi-Newton (first derivative) or Newton (second derivative) NLP solvers, and all derivatives required by the NLP solver are approximated using sparse finite-differencing of the optimal control problem functions. The key components of the software are described in detail and the utility of the software is demonstrated on five optimal control problems of varying complexity. The software described in this article provides researchers a useful platform upon which to solve a wide variety of complex constrained optimal control problems.},
annotation = {00069}
}
@misc{PROPT,
title = {{{PROPT}}},
url = {http://tomdyn.com/},
urldate = {2021-04-08}
}
@article{raoSurveyNumericalMethods2009,
title = {A Survey of Numerical Methods for Optimal Control},
author = {Rao, Anil V.},
year = {2009},
journal = {Advances in the Astronautical Sciences},
volume = {135},
number = {1},
pages = {497--528},
url = {http://vdol.mae.ufl.edu/ConferencePublications/trajectorySurveyAAS.pdf},
urldate = {2016-06-09},
annotation = {00147}
}
@misc{Rockit,
title = {Rockit},
url = {https://gitlab.kuleuven.be/meco-software/rockit},
urldate = {2021-04-08},
abstract = {An Optimal Control Problem abstraction class, built on top of CasADi's Opti},
howpublished = {KU Leuven MECO research team}
}
@article{rosmannTimeoptimalControlDirect2020,
title = {Time-Optimal Control with Direct Collocation and Variable Discretization},
author = {R{\"o}smann, Christoph and Makarow, Artemi and Bertram, Torsten},
year = {2020},
month = may,
journal = {arXiv:2005.12136 [cs, eess, math]},
eprint = {2005.12136},
primaryclass = {cs, eess, math},
url = {http://arxiv.org/abs/2005.12136},
urldate = {2021-04-01},
abstract = {This paper deals with time-optimal control of nonlinear continuous-time systems based on direct collocation. The underlying discretization grid is variable in time, as the time intervals are subject to optimization. This technique differs from approaches that are usually based on a time transformation. Hermite-Simpson collocation is selected as common representative in the field of optimal control and trajectory optimization. Hereby, quadratic splines approximate the system dynamics. Several splines of different order are suitable for the control parameterization. A comparative analysis reveals that increasing the degrees of freedom in control, e.g. quadratic splines, is not suitable for time-optimal control problems due to constraint violation and inherent oscillations. However, choosing constant or linear control splines points out to be very effective. A major advantage is that the implicit solution of the system dynamics is suited for stiff systems and often requires smaller grid sizes in practice.},
archiveprefix = {arxiv}
}
@article{sacconOptimalControlLie2013,
title = {Optimal {{Control}} on {{Lie Groups}}: {{The Projection Operator Approach}}},
shorttitle = {Optimal {{Control}} on {{Lie Groups}}},
author = {Saccon, A. and Hauser, J. and Aguiar, A. P.},
year = {2013},
month = sep,
journal = {IEEE Transactions on Automatic Control},
volume = {58},
number = {9},
pages = {2230--2245},
issn = {0018-9286},
doi = {10.1109/TAC.2013.2258817},
abstract = {Many nonlinear systems of practical interest evolve on Lie groups or on manifolds acted upon by Lie groups. Examples range from aircraft and underwater vehicles to quantum mechanical systems. In this paper, we develop an algorithm for solving continuous-time optimal control problems for systems evolving on (noncompact) Lie groups. This algorithm generalizes the projection operator approach for trajectory optimization originally developed for systems on vector spaces. Notions for generalizing system theoretic tools such as Riccati equations and linear and quadratic system approximations are developed. In this development, the covariant derivative of a map between two manifolds plays a key role in providing a chain rule for the required Lie group computations. An example optimal control problem on SO(3) is provided to highlight implementation details and to demonstrate the effectiveness of the method.}
}
@article{verschuerenAcadosModularOpensource2022,
title = {Acados---a Modular Open-Source Framework for Fast Embedded Optimal Control},
author = {Verschueren, Robin and Frison, Gianluca and Kouzoupis, Dimitris and Frey, Jonathan and van Duijkeren, Niels and Zanelli, Andrea and Novoselnik, Branimir and Albin, Thivaharan and Quirynen, Rien and Diehl, Moritz},
year = {2022},
month = mar,
journal = {Mathematical Programming Computation},
volume = {14},
number = {1},
pages = {147--183},
issn = {1867-2957},
doi = {10.1007/s12532-021-00208-8},
url = {https://doi.org/10.1007/s12532-021-00208-8},
urldate = {2022-04-04},
abstract = {This paper presents the acados~software package, a collection of solvers for fast embedded optimization intended for fast embedded applications. Its interfaces to higher-level languages make it useful for quickly designing an optimization-based control algorithm by putting together different algorithmic components that can be readily connected and interchanged. Since the core of acados is written on top of a high-performance linear algebra library, we do not sacrifice computational performance. Thus, we aim to provide both flexibility and performance through modularity, without the need to rely on automatic code generation, which facilitates maintainability and extensibility. The main features of acados are: efficient optimal control algorithms targeting embedded devices implemented in C, linear algebra based on the high-performance BLASFEO Frison (ACM Transactions on Mathematical Software (TOMS) 44: 1--30, 2018) library, user-friendly interfaces to Matlab and Python, and compatibility with the modeling language of CasADi Andersson (Mathematical Programming Computation 11: 136, 2019). acados is free and open-source software released under the permissive BSD 2-Clause license.},
langid = {english}
}
@article{verschuerenModularSoftwarePackage2018,
title = {Towards a Modular Software Package for Embedded Optimization},
author = {Verschueren, Robin and Frison, Gianluca and Kouzoupis, Dimitris and {van Duijkeren}, Niels and Zanelli, Andrea and Quirynen, Rien and Diehl, Moritz},
year = {2018},
month = jan,
journal = {IFAC-PapersOnLine},
series = {6th {{IFAC Conference}} on {{Nonlinear Model Predictive Control NMPC}} 2018},
volume = {51},
number = {20},
pages = {374--380},
issn = {2405-8963},
doi = {10.1016/j.ifacol.2018.11.062},
url = {https://www.sciencedirect.com/science/article/pii/S2405896318327204},
urldate = {2021-04-08},
abstract = {In this paper we present acados, a new software package for model predictive control. It provides a collection of embedded optimization algorithms written in C, with a strong focus on computational efficiency. Its modular structure makes it useful for rapid prototyping, i.e. designing a control algorithm by putting together different algorithmic components that are readily connected and interchanged. The usefulness of the software is demonstrated with a closed-loop simulation experiment of an inverted pendulum, which shows acados attaining sub-millisecond computation times per iteration. Furthermore, we showcase a new algorithmic idea in the context of embedded nonlinear model predictive control (NMPC), namely sequential convex quadratic programming (SCQP), along with an efficient implementation of it.},
langid = {english}
}
@article{vonstrykDirectIndirectMethods1992,
title = {Direct and Indirect Methods for Trajectory Optimization},
author = {{von Stryk}, O. and Bulirsch, R.},
year = {1992},
month = dec,
journal = {Annals of Operations Research},
volume = {37},
number = {1},
pages = {357--373},
issn = {1572-9338},
doi = {10.1007/BF02071065},
url = {https://doi.org/10.1007/BF02071065},
urldate = {2019-04-11},
abstract = {This paper gives a brief list of commonly used direct and indirect efficient methods for the numerical solution of optimal control problems. To improve the low accuracy of the direct methods and to increase the convergence areas of the indirect methods we suggest a hybrid approach. For this a special direct collocation method is presented. In a hybrid approach this direct method can be used in combination with multiple shooting. Numerical examples illustrate the direct method and the hybrid approach.},
langid = {english}
}
@incollection{vonstrykNumericalSolutionOptimal1993,
title = {Numerical {{Solution}} of {{Optimal Control Problems}} by {{Direct Collocation}}},
booktitle = {Optimal {{Control}}: {{Calculus}} of {{Variations}}, {{Optimal Control Theory}} and {{Numerical Methods}}},
author = {{von Stryk}, Oskar},
editor = {Bulirsch, R. and Miele, A. and Stoer, J. and Well, K.},
year = {1993},
series = {{{ISNM International Series}} of {{Numerical Mathematics}}},
pages = {129--143},
publisher = {Birkh{\"a}user Basel},
address = {Basel},
doi = {10.1007/978-3-0348-7539-4_10},
url = {https://doi.org/10.1007/978-3-0348-7539-4_10},
urldate = {2019-04-11},
abstract = {By an appropriate discretization of control and state variables, a constrained optimal control problem is transformed into a finite dimensional nonlinear program which can be solved by standard SQP-methods [10]. Convergence properties of the discretization are derived. Prom a solution of this method known as direct collocation, these properties are used to obtain reliable estimates of adjoint variables. In the presence of active state constraints, these estimates can be significantly improved by including the switching structure of the state constraint into the optimization procedure. Two numerical examples are presented.},
isbn = {978-3-0348-7539-4},
langid = {english}
}