SVM Support Vector Machine regression openCv c++ -
could explain me how use svm regression in opencv c++? regression should return value, value position in images(coordinates x , y)? or single number?
i have use in video analysis. have 3 points of know trajectories along whole video. use trajectories of 2 of them infer trajectory of third one. train svm these trajectories.
i thinking organize training data , labels that:
- training data: position(x,y) in each frame of 2 of points
- labels: position(x,y) of third point in each frame
is proper way? or should organize that:
- training data: position(x,y) in each frame of 3 points add negative example, changing position of third point, simulate false training set
- labels: 1 if third point position belong trajectory, -1 if belongs false training set
svm can used regression: see here http://alex.smola.org/papers/2003/smosch03b.pdf may problem more suitable multivariate rvm see here (with c++ source): http://mi.eng.cam.ac.uk/~at315/mvrvm.htm
here opencv edition of code:
#include<iostream> #include<fstream> #include<cstdlib> #include<windows.h> #include<string> #include<vector> #include<opencv2/opencv.hpp> using namespace std; using namespace cv; #define cout_var(x) cout << #x"=" << x << endl; #define show_img(x) namedwindow(#x);imshow(#x,x);waitkey(20); //calculates conolution of 2 ploynoimals // n - size of polynomial x // m - size of polynomial y // z - resultant polynomial after convolution void conv(int n,int m,double* x,double* y,double* z) { int i,j; for(i=0; i<(n+m-1); i++) { z[i]=0.0; for(j=0; j<n; j++) { if(((i-j)>=0)&& ((i-j)<m)) { z[i]+=x[j]*y[i-j]; } } } } //calculates poly noimal coeefeicents number of roots //input //n - number of roots //x - array of roots //ouput //y - array of polynomial coefficnets void poly(int n,double* x,double* y) { int i,j; for(j=0; j<=n; j++) { y[j]=0.0; } y[0]=1; double temp[20]; for(j=0; j<n; j++) { for(i=0; i<=(j); i++) { temp[i+1]=y[i+1]-x[j]*y[i]; } for(i=0; i<=(j); i++) { y[i+1]=temp[i+1]; } } } //find roots of polynomial int roots(int n,double* cof,double*roots) { bool fnzf=false; int k=0,i; double acof[100]; for(i=0; i<n; i++) { if(!fnzf) { if(fabs(cof[i])>10e-6) { fnzf=true; acof[k]=cof[i]; k++; } } else { acof[k]=cof[i]; k++; } } if(k==0) { return 0; } mat a(k,1,cv_64fc1); for(i=0; i<k; i++) { a.at<double>(i)=acof[i]; } mat r; cv::solvepoly(a,r); mat ch[2]; cv::split(r,ch); mat b=ch[0].clone(); for(i=0; i<b.rows; i++) { roots[i]=b.at<double>(i); } return b.rows; } //this function sloves polynomial equation of single hyperparameter void solveforalpha(int n,mat& q, mat& s,mat& q,mat s,double old_alpha,double& newalpha,double& l_inc) { double alpha_max=1e12; double c[500], p[500], pp[500], ss[500], cc[500]; int i,k,j; for(i=0; i<n; i++) { c[i]=0.0; } for(i=0; i<(2*n-1); i++) { cc[i]=0.0; } for(i=0; i<n; i++) { k=0; for(j=0; j<n; j++) { if(i==j) { continue; } ss[k]=-s.at<double>(j); k++; } poly(n-1,ss,p); conv(n,n,p,p,pp); for(j=0; j<n; j++) { c[j]=c[j]+p[j]; } for(j=0; j<(2*n-1); j++) { cc[j]=cc[j]+ q.at<double>(i)*q.at<double>(i)*pp[j]; } } for(i=0; i<n; i++) { ss[i]=-s.at<double>(i); } poly(n,ss,p); conv(n+1,n+1,p,p,pp); for(i=0; i<(2*n+1); i++) { pp[i]=n*pp[i]; } double cc0[500]; conv(n+1,n,p,c,cc0); double ccc[500]; ccc[0]=cc0[0]; for(i=1; i<(2*n); i++) { ccc[i]=cc[i-1]+cc0[i]; } ccc[2*n]=0; bool fnonzero=false; double aa[1000]; k=0; for(i=0; i<(2*n+1); i++) { aa[i]=pp[i]-ccc[i]; } double roots[100]; int nroots=roots(2*n+1,aa,roots); roots[nroots]=alpha_max; nroots++; double l[100]; double alphas[100]; for(i=0; i<100; i++) { l[i]=0.0; } k=0; for( i=0; i<nroots; i++) { if(roots[i]<=0) { continue; } double new_alpha=roots[i]; if((old_alpha>=(alpha_max-1))&& (new_alpha<alpha_max)) { alphas[k]=new_alpha; for(j=0; j<n; j++) { l[k]=l[k]+ log(new_alpha/(new_alpha+s.at<double>(j)))+((q.at<double>(j)*q.at<double>(j)/(new_alpha+s.at<double>(j)))); } } else if((old_alpha<alpha_max)&& (new_alpha>=(alpha_max-1))) { alphas[k]=new_alpha; for(j=0; j<n; j++) { l[k]=l[k]- log(old_alpha/(old_alpha+s.at<double>(j)))-((q.at<double>(j)*q.at<double>(j)/(old_alpha+s.at<double>(j)))); } } else if((old_alpha<alpha_max)&&(new_alpha<alpha_max)) { alphas[k]=new_alpha; double ad=(1/new_alpha)-(1/old_alpha); (j=0; j<n; j++) { l[k]=l[k]+ log(new_alpha/(new_alpha+s.at<double>(j)))+((q.at<double>(j)*q.at<double>(j)/(new_alpha+s.at<double>(j)))) -log(old_alpha/(old_alpha+s.at<double>(j)))-((q.at<double>(j)*q.at<double>(j)/(old_alpha+s.at<double>(j)))); } } else { alphas[k]=new_alpha; l[k]=0.0; } k++; } l_inc=-2000000.0; for(i=0; i<k; i++) if(l[i]>l_inc) { l_inc=l[i]; newalpha=alphas[i]; } } //------------------------------------------------------------------------------- mat distsqrd(mat& x,mat& y) { mat d2; int nx = x.rows; int ny = y.rows; mat xsq,ysq; pow(x,2,xsq); pow(y,2,ysq); mat sumsqx; cv::reduce(xsq,sumsqx,1,cv_reduce_sum); mat sumsqy; cv::reduce(ysq,sumsqy,1,cv_reduce_sum); d2 = sumsqx*mat::ones(1,ny,cv_64fc1) + mat::ones(nx,1,cv_64fc1)*sumsqy.t() - 2*(x*(y.t())); return d2; } mat kernelfunction(mat& x1,mat& x2,double lenscal) { mat k; int n1=x1.rows; int n2=x2.rows; int d=x1.cols; double eta = 1.0/pow(lenscal,2); exp(-eta*distsqrd(x1,x2),k); mat k1=mat(k.rows,k.cols+1,cv_64fc1); k.copyto(k1( range(0,k.rows),range(1,k1.cols))); k1.col(0)=1; return k1; } //output // weights trained weights // rv_idxs index column of phi of relevant basis //input // phi basis function/design matrix // t targetmatrix // n number of training examples // m number of basis functions //[ n m] =size(phi) // p number of target dimensions // [ n p]= size(t) // beta initial beta values target dimensions // beta set each target dimension p, beta_p=(1/(var(t_p)) void trainrvm( mat &x,mat &t,double kernelwidth,int max_it, mat& beta,mat &weights,mat& rv_idxs,mat& rv_x) { int n=x.rows; int m=n+1; int p=t.cols; int i,j,p,k,mm; const double inf=1e36; beta=mat (p,1,cv_64fc1); scalar mean,stddev; // начальное приближение точности for(int i=0;i<p;i++) { cv::meanstddev(t.col(i),mean,stddev); beta.at<double>(i)=1.0/(stddev[0]*stddev[0]+10.0*dbl_epsilon); } mat nonzero=mat::zeros(m,1,cv_8uc1); mat phi = kernelfunction(x,x,kernelwidth); mat phit; phit=phi.t()*t; const double alpha_max=1e12; mat alpha(m,1,cv_64fc1); alpha=alpha_max; mat gamma=mat::ones(m,1,cv_64fc1); bool run_cond; mat phi2=phi.t()*phi; mat s(p,1,cv_64fc1); mat s(p,1,cv_64fc1); mat q(p,1,cv_64fc1); mat q(p,1,cv_64fc1); mat l_inc(m,1,cv_64fc1); mat newalpha(m,1,cv_64fc1); double max_alpha, max_linc=-inf; int max_index=-1; int maxidx[2]; cv::minmaxidx(l_inc,0,0,0,maxidx); max_index=maxidx[0]; max_alpha=newalpha.at<double>(max_index); alpha.at<double>(max_index)=max_alpha; //start iterations run_cond=true; i=1; while(run_cond) { mm=0; for(j=0; j<m; j++) { if(alpha.at<double>(j)<alpha_max) { nonzero.at<unsigned char>(j)=true; mm++; } else { nonzero.at<unsigned char>(j)=false; } } mat phi_nz=mat::zeros(n,mm,cv_64fc1); mat alpha_nz(mm,1,cv_64fc1); k=0; for(j=0; j<m; j++) { if(nonzero.at<unsigned char>(j)) { alpha_nz.at<double>(k)=alpha.at<double>(j); phi.col(j).copyto(phi_nz.col(k)); k++; } } vector<mat> sigma_nz; mat hh=(phi_nz.t()*phi_nz); for(p=0; p<p; p++) { mat h=beta.at<double>(p)*hh; h.diag()+=alpha_nz; sigma_nz.push_back(h.inv(cv::decomp_svd)); } mat dp_tmp; mat phi; for(k=0; k<m; k++) { phi=phi.col(k); for(j=0; j<p; j++) { dp_tmp=beta.at<double>(j)*phi.t()*phi-beta.at<double>(j)*beta.at<double>(j)*phi.t()*phi_nz*sigma_nz[j]*phi_nz.t()*phi; s.at<double>(j)=dp_tmp.at<double>(0); dp_tmp=beta.at<double>(j)*phi.t()*t.col(j)-beta.at<double>(j)*beta.at<double>(j)*phi.t()*phi_nz*sigma_nz[j]*phi_nz.t()*t.col(j); q.at<double>(j)=dp_tmp.at<double>(0); } if(alpha.at<double>(k)<alpha_max) { s=alpha.at<double>(k)*s/(alpha.at<double>(k)-s); q=alpha.at<double>(k)*q/(alpha.at<double>(k)-s); } else { s=s.clone(); q=q.clone(); } solveforalpha(p,q,s,q,s,alpha.at<double>(k),newalpha.at<double>(k),l_inc.at<double>(k)); } cv::minmaxidx(l_inc,0,&max_linc,0,maxidx); max_index=maxidx[0]; max_alpha=newalpha.at<double>(max_index); alpha.at<double>(max_index)=max_alpha; weights=mat(mm,p,cv_64fc1); for(j=0; j<p; j++) { mat tmp=beta.at<double>(j)*sigma_nz[j]*phi_nz.t()*t.col(j); tmp.copyto(weights.col(j)); double gamma_sum=0.0; gamma_sum=sum(1.0-alpha_nz.t()*sigma_nz[j].diag())[0]; mat error=t.col(j)-phi_nz*(weights.col(j)); pow(error,2,error); double ed=0.0; ed=sum(error)[0]; beta.at<double>(j)=(((double)n)- gamma_sum)/ed; } i++; // Критерий выхода по количеству итераций или по максимальному // изменению не превышающему заданный порог. if(i>max_it || max_linc<(100.0*dbl_epsilon)) { run_cond=false; } } mat nonzero_m=mat(nonzero); // Количество релевантных векторов int nrvs=cv::countnonzero(nonzero_m); // Индексы векторов rv_idxs=mat(nrvs,1,cv_32sc1); // Координаты векторов (вторая координата по каждому измерению вычисляется) rv_x=mat(nrvs-1,1,cv_64fc1); int ind=0; for(int i=0;i<nonzero.rows;i++) { if(nonzero.at<unsigned char>(i)) { rv_idxs.at<int>(ind)=i; // первый индекс соответствует добавленной единице смещения // поэтому начинаем со второго. if(i>0) { rv_x.at<double>(ind-1)=x.at<double>(i); } ind++; } } } // ------------------------------------------------------------------- // Загрузка матриц. Для взаимодействия с матлабом во время отладки. // ------------------------------------------------------------------- void readmat(ifstream& ifs,mat& m) { int rows; int cols; ifs >> rows; char comma; ifs >> comma; ifs >> cols; m=mat(rows,cols,cv_64fc1); for(int i=0;i<rows;i++) { for(int j=0;j<cols;j++) { ifs >> m.at<double>(i,j); if(j!=cols-1){ifs >> comma;} } } } // ------------------------------------------------------------------- // matlab matrix loading (for debug purpose) // ------------------------------------------------------------------- mat loadmatrix(string filename) { mat m; ifstream ifs(filename); if (ifs.is_open()) { readmat(ifs,m); ifs.close(); } return m; } void predictrvm(mat& x,mat& rv_x,mat& weights,double kernelwidth,mat& y_rvm) { mat phi=kernelfunction(x,rv_x,kernelwidth); y_rvm=phi*weights; } int main() { int gh=600; int gw=600; mat graph=mat::zeros(gh,gw,cv_8uc3); namedwindow("graph"); mat t=loadmatrix("t.txt"); mat x=loadmatrix("x.txt"); for(int i=0;i<x.rows;i++) { circle(graph,point(i*6,gw-(t.at<double>(i,0)+1)*160),2,scalar(0,255,255),-1,cv_aa); circle(graph,point(i*6,gw-(t.at<double>(i,1)+1)*160),2,scalar(0,255,255),-1,cv_aa); } imshow("graph",graph); waitkey(10); cout.precision(15); mat weights; mat rv_idxs; mat beta; mat rv_x; double kernelwidth=4; trainrvm(x,t,kernelwidth,50, beta,weights,rv_idxs,rv_x); cout_var(weights); mat y_rvm; predictrvm(x,rv_x,weights,kernelwidth,y_rvm); filestorage fs("test.yml", filestorage::write); fs << "weights" << weights; fs << "rv_x" << rv_x; fs.release(); for(int i=1;i<x.rows;i++) { point2d p11(i*6,gw-(y_rvm.at<double>(i,0)+1)*160); point2d p21(i*6,gw-(y_rvm.at<double>(i,1)+1)*160); i--; point2d p12(i*6,gw-(y_rvm.at<double>(i,0)+1)*160); point2d p22(i*6,gw-(y_rvm.at<double>(i,1)+1)*160); i++; line(graph,p11,p12,scalar(255,255,0),1,cv_aa); line(graph,p21,p22,scalar(255,255,0),1,cv_aa); } imshow("graph",graph); waitkey(); return 0; }
Comments
Post a Comment