|
15 | 15 |
|
16 | 16 |
|
17 | 17 | % (0) Initialize an empty PEP
|
18 |
| -P=pep(); |
| 18 | +P = pep(); |
19 | 19 |
|
20 | 20 | % (1) Set up the objective function
|
21 |
| -param.mu=0; |
22 |
| -param.L=1; % Smoothness parameter |
| 21 | +param.mu = 0; % strong convexity parameter |
| 22 | +param.L = 1; % Smoothness parameter |
23 | 23 |
|
24 | 24 | F=P.DeclareFunction('SmoothStronglyConvex',param); % F is the objective function
|
25 | 25 |
|
26 | 26 | % (2) Set up the starting point and initial condition
|
27 |
| -x0=P.StartingPoint(); % x0 is some starting point |
28 |
| -[xs,fs]=F.OptimalPoint(); % xs is an optimal point, and fs=F(xs) |
29 |
| -P.InitialCondition((x0-xs)^2<=1); % Add an initial condition ||x0-xs||^2<= 1 |
| 27 | +x0 = P.StartingPoint(); % x0 is some starting point |
| 28 | +[xs, fs] = F.OptimalPoint(); % xs is an optimal point, and fs=F(xs) |
| 29 | +P.InitialCondition((x0-xs)^2 <= 1); % Add an initial condition ||x0-xs||^2<= 1 |
30 | 30 |
|
31 | 31 | % (3) Algorithm
|
32 |
| -N=7; % number of iterations |
33 |
| - |
34 |
| -x=cell(N+1,1);% we store the iterates in a cell for convenience |
35 |
| -x{1}=x0; |
36 |
| -y=x0; |
37 |
| -eps=.1; |
38 |
| -for i=1:N |
39 |
| - d = inexactsubgradient(y,F,eps); |
40 |
| - x{i+1}=y-1/param.L*d; |
41 |
| - y=x{i+1}+(i-1)/(i+2)*(x{i+1}-x{i}); |
| 32 | +N = 7; % number of iterations |
| 33 | + |
| 34 | +x = cell(N+1,1); % we store the iterates in a cell for convenience |
| 35 | +x{1} = x0; |
| 36 | +y = x0; |
| 37 | +eps = .1; |
| 38 | +for i = 1:N |
| 39 | + d = inexactsubgradient(y, F, eps); |
| 40 | + x{i+1} = y - 1/param.L * d; |
| 41 | + y = x{i+1} + (i-1)/(i+2) * (x{i+1} - x{i}); |
42 | 42 | end
|
43 | 43 |
|
44 | 44 | % (4) Set up the performance measure
|
45 |
| -[g,f]=F.oracle(x{N+1}); % g=grad F(x), f=F(x) |
46 |
| -P.PerformanceMetric(f-fs); % Worst-case evaluated as F(x)-F(xs) |
| 45 | +[g, f] = F.oracle(x{N+1}); % g=grad F(x), f=F(x) |
| 46 | +P.PerformanceMetric(f - fs); % Worst-case evaluated as F(x)-F(xs) |
47 | 47 |
|
48 | 48 | % (5) Solve the PEP
|
49 | 49 | P.solve()
|
50 | 50 |
|
51 | 51 | % (6) Evaluate the output
|
52 |
| -double(f-fs) % worst-case objective function accuracy |
| 52 | +double(f - fs) % worst-case objective function accuracy |
53 | 53 |
|
54 | 54 | % Result should be worse than 2/(N^2+5*N+6) (for exact fast gradient)
|
55 | 55 | % see Taylor, Adrien B., Julien M. Hendrickx, and François Glineur.
|
|
0 commit comments