Matlab SVM

  • W8_980137
    了解作者
  • 616KB
    文件大小
  • zip
    文件格式
  • 0
    收藏次数
  • VIP专享
    资源类型
  • 0
    下载次数
  • 2022-05-25 01:55
    上传日期
在matlab软件中,实现分类所用的SVM分类器
libsvm-3.14.zip
  • libsvm-3.14
  • windows
  • libsvmread.mexw64
    11KB
  • svmtrain.mexw64
    62.5KB
  • svm-toy.exe
    138KB
  • svm-train.exe
    152KB
  • libsvmwrite.mexw64
    10KB
  • svm-scale.exe
    79KB
  • svmpredict.mexw64
    25KB
  • libsvm.dll
    156.5KB
  • svm-predict.exe
    123KB
  • matlab
  • svmtrain.c
    11.2KB
  • svm_model_matlab.h
    201B
  • svmpredict.c
    9.3KB
  • libsvmwrite.c
    2.1KB
  • svm_model_matlab.c
    8KB
  • Makefile
    1.5KB
  • libsvmread.c
    3.9KB
  • README
    9.4KB
  • make.m
    798B
  • svm.def
    477B
  • FAQ.html
    70.6KB
  • svm.cpp
    62.3KB
  • svm-scale.c
    7.7KB
  • tools
  • subset.py
    3.2KB
  • grid.py
    11.8KB
  • easy.py
    2.6KB
  • checkdata.py
    2.4KB
  • README
    4.6KB
  • COPYRIGHT
    1.5KB
  • Makefile.win
    1.1KB
  • svm-train.c
    8.8KB
  • Makefile
    732B
  • svm.h
    3.3KB
  • svm-predict.c
    5.4KB
  • heart_scale
    27KB
  • README
    27.6KB
  • python
  • svmutil.py
    8.3KB
  • svm.py
    8.9KB
  • Makefile
    32B
  • README
    11.6KB
  • java
  • svm_predict.java
    4.7KB
  • libsvm.jar
    50.4KB
  • svm_train.java
    8.2KB
  • svm_scale.java
    8.7KB
  • test_applet.html
    81B
  • Makefile
    624B
  • libsvm
  • svm.java
    61.4KB
  • svm_problem.java
    136B
  • svm_model.java
    868B
  • svm.m4
    60.7KB
  • svm_parameter.java
    1.3KB
  • svm_node.java
    115B
  • svm_print_interface.java
    87B
  • svm_toy.java
    12KB
  • svm-toy
  • windows
  • svm-toy.cpp
    11.2KB
  • qt
  • Makefile
    392B
  • svm-toy.cpp
    9.5KB
  • gtk
  • callbacks.cpp
    10.1KB
  • interface.h
    203B
  • Makefile
    573B
  • svm-toy.glade
    6.3KB
  • interface.c
    6.3KB
  • callbacks.h
    1.7KB
  • main.c
    398B
内容介绍
package libsvm; import java.io.*; import java.util.*; // // Kernel Cache // // l is the number of total data items // size is the cache size limit in bytes // class Cache { private final int l; private long size; private final class head_t { head_t prev, next; // a cicular list float[] data; int len; // data[0,len) is cached in this entry } private final head_t[] head; private head_t lru_head; Cache(int l_, long size_) { l = l_; size = size_; head = new head_t[l]; for(int i=0;i<l;i++) head[i] = new head_t(); size /= 4; size -= l * (16/4); // sizeof(head_t) == 16 size = Math.max(size, 2* (long) l); // cache must be large enough for two columns lru_head = new head_t(); lru_head.next = lru_head.prev = lru_head; } private void lru_delete(head_t h) { // delete from current location h.prev.next = h.next; h.next.prev = h.prev; } private void lru_insert(head_t h) { // insert to last position h.next = lru_head; h.prev = lru_head.prev; h.prev.next = h; h.next.prev = h; } // request data [0,len) // return some position p where [p,len) need to be filled // (p >= len if nothing needs to be filled) // java: simulate pointer using single-element array int get_data(int index, float[][] data, int len) { head_t h = head[index]; if(h.len > 0) lru_delete(h); int more = len - h.len; if(more > 0) { // free old space while(size < more) { head_t old = lru_head.next; lru_delete(old); size += old.len; old.data = null; old.len = 0; } // allocate new space float[] new_data = new float[len]; if(h.data != null) System.arraycopy(h.data,0,new_data,0,h.len); h.data = new_data; size -= more; do {int _=h.len; h.len=len; len=_;} while(false); } lru_insert(h); data[0] = h.data; return len; } void swap_index(int i, int j) { if(i==j) return; if(head[i].len > 0) lru_delete(head[i]); if(head[j].len > 0) lru_delete(head[j]); do {float[] _=head[i].data; head[i].data=head[j].data; head[j].data=_;} while(false); do {int _=head[i].len; head[i].len=head[j].len; head[j].len=_;} while(false); if(head[i].len > 0) lru_insert(head[i]); if(head[j].len > 0) lru_insert(head[j]); if(i>j) do {int _=i; i=j; j=_;} while(false); for(head_t h = lru_head.next; h!=lru_head; h=h.next) { if(h.len > i) { if(h.len > j) do {float _=h.data[i]; h.data[i]=h.data[j]; h.data[j]=_;} while(false); else { // give up lru_delete(h); size += h.len; h.data = null; h.len = 0; } } } } } // // Kernel evaluation // // the static method k_function is for doing single kernel evaluation // the constructor of Kernel prepares to calculate the l*l kernel matrix // the member function get_Q is for getting one column from the Q Matrix // abstract class QMatrix { abstract float[] get_Q(int column, int len); abstract double[] get_QD(); abstract void swap_index(int i, int j); }; abstract class Kernel extends QMatrix { private svm_node[][] x; private final double[] x_square; // svm_parameter private final int kernel_type; private final int degree; private final double gamma; private final double coef0; abstract float[] get_Q(int column, int len); abstract double[] get_QD(); void swap_index(int i, int j) { do {svm_node[] _=x[i]; x[i]=x[j]; x[j]=_;} while(false); if(x_square != null) do {double _=x_square[i]; x_square[i]=x_square[j]; x_square[j]=_;} while(false); } private static double powi(double base, int times) { double tmp = base, ret = 1.0; for(int t=times; t>0; t/=2) { if(t%2==1) ret*=tmp; tmp = tmp * tmp; } return ret; } double kernel_function(int i, int j) { switch(kernel_type) { case svm_parameter.LINEAR: return dot(x[i],x[j]); case svm_parameter.POLY: return powi(gamma*dot(x[i],x[j])+coef0,degree); case svm_parameter.RBF: return Math.exp(-gamma*(x_square[i]+x_square[j]-2*dot(x[i],x[j]))); case svm_parameter.SIGMOID: return Math.tanh(gamma*dot(x[i],x[j])+coef0); case svm_parameter.PRECOMPUTED: return x[i][(int)(x[j][0].value)].value; default: return 0; // java } } Kernel(int l, svm_node[][] x_, svm_parameter param) { this.kernel_type = param.kernel_type; this.degree = param.degree; this.gamma = param.gamma; this.coef0 = param.coef0; x = (svm_node[][])x_.clone(); if(kernel_type == svm_parameter.RBF) { x_square = new double[l]; for(int i=0;i<l;i++) x_square[i] = dot(x[i],x[i]); } else x_square = null; } static double dot(svm_node[] x, svm_node[] y) { double sum = 0; int xlen = x.length; int ylen = y.length; int i = 0; int j = 0; while(i < xlen && j < ylen) { if(x[i].index == y[j].index) sum += x[i++].value * y[j++].value; else { if(x[i].index > y[j].index) ++j; else ++i; } } return sum; } static double k_function(svm_node[] x, svm_node[] y, svm_parameter param) { switch(param.kernel_type) { case svm_parameter.LINEAR: return dot(x,y); case svm_parameter.POLY: return powi(param.gamma*dot(x,y)+param.coef0,param.degree); case svm_parameter.RBF: { double sum = 0; int xlen = x.length; int ylen = y.length; int i = 0; int j = 0; while(i < xlen && j < ylen) { if(x[i].index == y[j].index) { double d = x[i++].value - y[j++].value; sum += d*d; } else if(x[i].index > y[j].index) { sum += y[j].value * y[j].value; ++j; } else { sum += x[i].value * x[i].value; ++i; } } while(i < xlen) { sum += x[i].value * x[i].value; ++i; } while(j < ylen) { sum += y[j].value * y[j].value; ++j; } return Math.exp(-param.gamma*sum); } case svm_parameter.SIGMOID: return Math.tanh(param.gamma*dot(x,y)+param.coef0); case svm_parameter.PRECOMPUTED: return x[(int)(y[0].value)].value; default: return 0; // java } } } // An SMO algorithm in Fan et al., JMLR 6(2005), p. 1889--1918 // Solves: // // min 0.5(\alpha^T Q \alpha) + p^T \alpha // // y^T \alpha = \delta // y_i = +1 or -1 // 0 <= alpha_i <= Cp for y_i = 1 // 0 <= alpha_i <= Cn for y_i = -1 // // Given: // // Q, p, y, Cp, Cn, and an initial feasible point \alpha // l is the size of vectors and matrices // eps is the stopping tolerance // // solution will be put in \alpha, objective value will be put in obj // class Solver { int active_size; byte[] y; double[] G; // gradient of objective function static final byte LOWER_BOUND = 0; static final byte UPPER_BOUND = 1; static final byte FREE = 2; byte[] alpha_status; // LOWER_BOUND, UPPER_BOUND, FREE double[] alpha; QMatrix Q; double[] QD; double eps; double Cp,Cn; double[] p; int[] active_set; double[] G_bar; // gradient, if we treat free variables as 0 int l; boolean unshrink; // XXX static final double INF = java.lang.Double.POSITIVE_INFINITY; double get_C(int i) { return (y[i] > 0)? Cp : Cn; } void update_alpha_status(int i) { if(alpha[i] >= get_C(i)) alpha_status[i] = UPPER_BOUND; else if(alpha[i] <= 0) alpha_status[i] = LOWER_BOUND; else alpha_status[i] = FREE; } boolean is_upper_bound(int i) { return alpha_status[i] == UPPER_BOUND; } boolean is_lower_bound(int i) { return alpha_status[i] == LOWER_BOUND; } boolean is_free(int i) { return alpha_status[i] == FREE; } // java: information about solution except alpha, // because we cannot return multiple values otherwise... static class SolutionInfo { double obj; double rho; double upper_bound_p; double upper_bound_n; double r; // for Solver_NU } void swap_index(int i, int j) { Q.swap_index(i,j); do {byte _=y[i]; y[i]=y[j]; y[j]=_;} while(false); do {double _=G[i]; G[i]=G[j]; G[j]=_;} while(false); do {byte _=alpha_status[i]; alpha_status[i]=alpha_status[j]; alpha_status[j]=_;} while(false); do {double _=alpha[i]; alpha
评论
    相关推荐
    • 神经网络分类matlab程序
      使用matlab编写的神经网络分类程序,方便实用 使用matlab编写的神经网络分类程序,方便实用 使用matlab编写的神经网络分类程序,方便实用 使用matlab编写的神经网络分类程序,方便实用
    • 分类MATLAB程序
      十几个程序,谱聚类的完整例子,有详细的例子,绝对物有所值,包括花朵分类,核函数等,MATLAB程序,下载绝对值得!包括研究生写论文,也是值得参考
    • 模式分类matlab实现
      duda给出的模式分类一书的matlab实现.
    • SVM文本分类MatLAB源代码
      SVM文本分类MatLAB源代码 为m-file格式
    • 多层ELM进行MNIST手写字符分类MATLAB代码
      多层ELM进行MNIST手写字符分类MATLAB代码,直接运行.m程序,如果现实内存溢出,请改小隐藏节点个数。
    • Fisher分类MATLAB算法.rar
      手写数字识别Fisher分类MATLAB算法。内有程序,8个数字样本的训练样本。数字的特征提取部分解压后请见project report.ppt.
    • 高维多标签分类matlab
      高维多标签分类matlab knn,svm,随机森林等算法 784维数据 分为10类
    • svm多分类matlab程序
      svm多分类matlab程序,包括一对一、一对余等二叉树分类算法
    • 神经网络分类matlab程序
      使用matlab编写的神经网络分类程序,方便实用
    • 朴素贝叶斯分类matlab实现
      分类算法是统计学分类方法,它是一类利用概率统计知识进行分类的算法。在许多场合,朴素贝叶斯(Naïve Bayes,NB)分类算法可以与决策树和神经网络分类算法相媲美,该算法能运用到大型数据库中,且方法简单、分类...