Home > learn_constraint > ccl_learna_lambda.m

ccl_learna_lambda

PURPOSE ^

[optimal] = ccl_learna_lambda (Un, X, J, options)

SYNOPSIS ^

function [optimal] = ccl_learna_lambda (Un, X, J, options)

DESCRIPTION ^

 [optimal] = ccl_learna_lambda (Un, X, J, options)

 Learning state dependent selection matrix (Lambda) for problem with the form
 Un = N(q) * F(q) where N(q) = I - pinv(A(q))A(q) is a state dependent projection matrix
                        A(q) = Lambda J(q)
                        F(q) is some policy
 Input:

   X                                 State of the system
   Un                                Control of the system generated with the form Un(q) = N(q) * F(q)
                                     where N(q)=I-pinv(A(q))'A(q) is the projection matrix that projects
                                     F(q) unto the nullspace of A(q). N(q) can be state dependent, but
                                     it should be generated in a consistent way.
 Output:

   optimal                           A model for the projection matrix
   optimal.f_proj(q)                 A function that predicts N(q) given q

CROSS-REFERENCE INFORMATION ^

This function calls: This function is called by:

SOURCE CODE ^

0001 function [optimal] = ccl_learna_lambda (Un, X, J, options)
0002 % [optimal] = ccl_learna_lambda (Un, X, J, options)
0003 %
0004 % Learning state dependent selection matrix (Lambda) for problem with the form
0005 % Un = N(q) * F(q) where N(q) = I - pinv(A(q))A(q) is a state dependent projection matrix
0006 %                        A(q) = Lambda J(q)
0007 %                        F(q) is some policy
0008 % Input:
0009 %
0010 %   X                                 State of the system
0011 %   Un                                Control of the system generated with the form Un(q) = N(q) * F(q)
0012 %                                     where N(q)=I-pinv(A(q))'A(q) is the projection matrix that projects
0013 %                                     F(q) unto the nullspace of A(q). N(q) can be state dependent, but
0014 %                                     it should be generated in a consistent way.
0015 % Output:
0016 %
0017 %   optimal                           A model for the projection matrix
0018 %   optimal.f_proj(q)                 A function that predicts N(q) given q
0019 
0020 
0021 
0022 
0023 % CCL: A MATLAB library for Constraint Consistent Learning
0024 % Copyright (C) 2007  Matthew Howard
0025 % Contact: matthew.j.howard@kcl.ac.uk
0026 %
0027 % This library is free software; you can redistribute it and/or
0028 % modify it under the terms of the GNU Lesser General Public
0029 % License as published by the Free Software Foundation; either
0030 % version 2.1 of the License, or (at your option) any later version.
0031 %
0032 % This library is distributed in the hope that it will be useful,
0033 % but WITHOUT ANY WARRANTY; without even the implied warranty of
0034 % MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
0035 % Lesser General Public License for more details.
0036 %
0037 % You should have received a copy of the GNU Library General Public
0038 % License along with this library; if not, write to the Free
0039 % Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
0040 
0041 % essential parameters
0042 model.dim_r     = options.dim_r ;   % dimensionality of the end effector
0043 model.dim_x     = size(X, 1) ;      % dimensionality of input
0044 model.dim_u     = size(Un,1) ;      % dimensionality of output Un = N(X) * F(X) where X
0045 model.dim_t     = model.dim_r - 1 ; % dimensionality of each constraint parameters
0046 model.dim_n     = size(X,2) ;       % number of training points
0047 model.dim_b     = options.dim_b ;   % dimensionality of the gaussian kernel basis
0048 optimal.nmse    = 10000000 ;        % initialise the first model
0049 model.var       = sum(var(Un,0,2)) ;% variance of Un
0050 
0051 
0052 
0053 % The constraint matrix consists of K mutually orthogonal constraint vectors.
0054 % At the k^{th} each iteration, candidate constraint vectors are
0055 % rotated to the space orthogonal to the all ( i < k ) constraint
0056 % vectors. At the first iteration, Rn = identity matrix
0057 
0058 Vn = zeros(model.dim_r, model.dim_n) ;
0059 for n = 1 : model.dim_n
0060     Vn(:,n) = J(X(:,n)) * Un(:,n) ;
0061     norm_v(n) = norm(Vn(:,n)) ;
0062 end
0063 id_keep = find(norm_v > 1e-3) ;
0064 Vn = Vn(:,id_keep) ;
0065 X  = X(:,id_keep) ;
0066 Un = Un(:,id_keep) ;
0067 
0068 model.dim_n     = size(X,2) ;       % number of training points
0069 % choose a method for generating the centres for gaussian kernel. A
0070 % grid centre is usually adequate for a 2D problem. For higher
0071 % dimensionality, kmeans centre normally performs better
0072 if model.dim_x < 3
0073     model.dim_b = floor(sqrt(model.dim_b))^2 ;
0074     centres     = ccl_math_gc (X, model.dim_b) ;          % generate centres based on grid
0075 else
0076     centres     = ccl_math_kc (X, model.dim_b) ;        % generate centres based on K-means
0077 end
0078 variance        = mean(mean(sqrt(ccl_math_distances(centres, centres))))^2 ; % set the variance as the mean distance between centres
0079 model.phi       = @(x) ccl_basis_rbf ( x, centres,  variance );   % gaussian kernel basis function
0080 BX              = model.phi(X) ;                                    % K(X)
0081 
0082 Rn = cell(1,model.dim_n) ;
0083 for n = 1 : model.dim_n
0084     Rn{n} = eye(model.dim_r) ;
0085 end
0086 RnVn  = Vn ;
0087 % The objective functions is E(Xn) = Lambda * Rn * Vn.
0088 % For faster computation, RnVn = Rn*Vn is pre-caldulated to avoid
0089 % repeated calculation during non-linear optimisation. At the first iteration, the rotation matrix is the identity matrix, so RnUn = Un
0090 
0091 for alpha_id = 1:model.dim_r
0092     model.dim_k = alpha_id ;
0093     model       = ccl_learna_sa (BX, RnVn, model ) ;                                    % search the optimal k^(th) constraint vector
0094     theta       = [pi/2*ones(model.dim_n, (alpha_id-1)), (model.w{alpha_id}* BX)' ] ;     % predict constraint parameters
0095     for n = 1: model.dim_n
0096         Rn{n}       = ccl_math_rotmat (theta(n,:), Rn{n}, model, alpha_id) ;    % update rotation matrix for the next iteration
0097         RnVn(:,n)   = Rn{n} * Vn(:,n) ;                                             % rotate Un ahead for faster computation
0098     end
0099     % if the k^(th) constraint vector reduce the fitness, then the
0100     % previously found vectors are enough to describe the constraint
0101     if (model.nmse > optimal.nmse) && (model.nmse > 1e-3)
0102         break ;
0103     else
0104         optimal     = model ;
0105         %             theta       = [pi/2*ones(model.dim_n, (alpha_id-1)), (model.w{alpha_id}* BX)' ] ;     % predict constraint parameters
0106         %             Rn          = get_rotation_matrix (theta(n,:), Rn, model, alpha_id) ;  % update rotation matrix for the next iteration
0107         %             for n = 1: model.dim_n
0108         %                 RnVn(:,n)  = Rn * Vn(:,n) ;
0109         %             end
0110     end
0111 end
0112 optimal.f_proj  = @(q) ccl_learna_pred_proj_lambda (q, optimal, J, eye(model.dim_r)) ;
0113 fprintf('\t Found %d constraint vectors with residual error = %4.2e\n', optimal.dim_k, optimal.nmse) ;
0114 end

Generated on Mon 01-Jan-2018 15:49:39 by m2html © 2005