12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008 |
- #include <math.h>
- #include <stdio.h>
- #include <stdlib.h>
- #include <ctype.h>
- #include <float.h>
- #include <string.h>
- #include <stdarg.h>
- #include <limits.h>
- #include <locale.h>
- #include "svm.h"
- #ifdef _OPENMP
- #include <omp.h>
- #endif
- int libsvm_version = LIBSVM_VERSION;
- typedef float Qfloat;
- typedef signed char schar;
- #ifndef min
- template<class T>
- static inline T min(T x, T y) { return (x < y) ? x : y; }
- #endif
- #ifndef max
- template<class T>
- static inline T max(T x, T y) { return (x > y) ? x : y; }
- #endif
- template<class T>
- static inline void swap(T &x, T &y) {
- T t = x;
- x = y;
- y = t;
- }
- template<class S, class T>
- static inline void clone(T *&dst, S *src, int n) {
- dst = new T[n];
- memcpy((void *) dst, (void *) src, sizeof(T) * n);
- }
- static inline double powi(double base, int times) {
- double tmp = base, ret = 1.0;
- for (int t = times; t > 0; t /= 2) {
- if (t % 2 == 1) ret *= tmp;
- tmp = tmp * tmp;
- }
- return ret;
- }
- #define INF HUGE_VAL
- #define TAU 1e-12
- #define Malloc(type, n) (type *)malloc((n)*sizeof(type))
- static void print_string_stdout(const char *s) {
- fputs(s, stdout);
- fflush(stdout);
- }
- static void (*svm_print_string)(const char *) = &print_string_stdout;
- #if 1
- static void info(const char *fmt, ...) {
- char buf[BUFSIZ];
- va_list ap;
- va_start(ap, fmt);
- vsprintf(buf, fmt, ap);
- va_end(ap);
- (*svm_print_string)(buf);
- }
- #else
- static void info(const char *fmt,...) {}
- #endif
- //
- // Kernel Cache
- //
- // l is the number of total data items
- // size is the cache size limit in bytes
- //
- class Cache {
- public:
- Cache(int l, long int size);
- ~Cache();
- // request data [0,len)
- // return some position p where [p,len) need to be filled
- // (p >= len if nothing needs to be filled)
- int get_data(const int index, Qfloat **data, int len);
- void swap_index(int i, int j);
- private:
- int l;
- long int size;
- struct head_t {
- head_t *prev, *next; // a circular list
- Qfloat *data;
- int len; // data[0,len) is cached in this entry
- };
- head_t *head;
- head_t lru_head;
- void lru_delete(head_t *h);
- void lru_insert(head_t *h);
- };
- Cache::Cache(int l_, long int size_) : l(l_), size(size_) {
- head = (head_t *) calloc(l, sizeof(head_t)); // initialized to 0
- size /= sizeof(Qfloat);
- size -= l * sizeof(head_t) / sizeof(Qfloat);
- size = max(size, 2 * (long int) l); // cache must be large enough for two columns
- lru_head.next = lru_head.prev = &lru_head;
- }
- Cache::~Cache() {
- for (head_t *h = lru_head.next; h != &lru_head; h = h->next)
- free(h->data);
- free(head);
- }
- void Cache::lru_delete(head_t *h) {
- // delete from current location
- h->prev->next = h->next;
- h->next->prev = h->prev;
- }
- void Cache::lru_insert(head_t *h) {
- // insert to last position
- h->next = &lru_head;
- h->prev = lru_head.prev;
- h->prev->next = h;
- h->next->prev = h;
- }
- int Cache::get_data(const int index, Qfloat **data, int len) {
- head_t *h = &head[index];
- if (h->len) lru_delete(h);
- int more = len - h->len;
- if (more > 0) {
- // free old space
- while (size < more) {
- head_t *old = lru_head.next;
- lru_delete(old);
- free(old->data);
- size += old->len;
- old->data = 0;
- old->len = 0;
- }
- // allocate new space
- h->data = (Qfloat *) realloc(h->data, sizeof(Qfloat) * len);
- size -= more;
- swap(h->len, len);
- }
- lru_insert(h);
- *data = h->data;
- return len;
- }
- void Cache::swap_index(int i, int j) {
- if (i == j) return;
- if (head[i].len) lru_delete(&head[i]);
- if (head[j].len) lru_delete(&head[j]);
- swap(head[i].data, head[j].data);
- swap(head[i].len, head[j].len);
- if (head[i].len) lru_insert(&head[i]);
- if (head[j].len) lru_insert(&head[j]);
- if (i > j) swap(i, j);
- for (head_t *h = lru_head.next; h != &lru_head; h = h->next) {
- if (h->len > i) {
- if (h->len > j)
- swap(h->data[i], h->data[j]);
- else {
- // give up
- lru_delete(h);
- free(h->data);
- size += h->len;
- h->data = 0;
- h->len = 0;
- }
- }
- }
- }
- //
- // Kernel evaluation
- //
- // the static method k_function is for doing single kernel evaluation
- // the constructor of Kernel prepares to calculate the l*l kernel matrix
- // the member function get_Q is for getting one column from the Q Matrix
- //
- class QMatrix {
- public:
- virtual Qfloat *get_Q(int column, int len) const = 0;
- virtual double *get_QD() const = 0;
- virtual void swap_index(int i, int j) const = 0;
- virtual ~QMatrix() {}
- };
- class Kernel : public QMatrix {
- public:
- Kernel(int l, svm_node *const *x, const svm_parameter ¶m);
- virtual ~Kernel();
- static double k_function(const svm_node *x, const svm_node *y,
- const svm_parameter ¶m);
- virtual Qfloat *get_Q(int column, int len) const = 0;
- virtual double *get_QD() const = 0;
- virtual void swap_index(int i, int j) const // no so const...
- {
- swap(x[i], x[j]);
- if (x_square) swap(x_square[i], x_square[j]);
- }
- protected:
- double (Kernel::*kernel_function)(int i, int j) const;
- private:
- const svm_node **x;
- double *x_square;
- // svm_parameter
- const int kernel_type;
- const int degree;
- const double gamma;
- const double coef0;
- static double dot(const svm_node *px, const svm_node *py);
- double kernel_linear(int i, int j) const {
- return dot(x[i], x[j]);
- }
- double kernel_poly(int i, int j) const {
- return powi(gamma * dot(x[i], x[j]) + coef0, degree);
- }
- double kernel_rbf(int i, int j) const {
- return exp(-gamma * (x_square[i] + x_square[j] - 2 * dot(x[i], x[j])));
- }
- double kernel_sigmoid(int i, int j) const {
- return tanh(gamma * dot(x[i], x[j]) + coef0);
- }
- double kernel_precomputed(int i, int j) const {
- return x[i][(int) (x[j][0].value)].value;
- }
- };
- Kernel::Kernel(int l, svm_node *const *x_, const svm_parameter ¶m)
- : kernel_type(param.kernel_type), degree(param.degree),
- gamma(param.gamma), coef0(param.coef0) {
- switch (kernel_type) {
- case LINEAR:
- kernel_function = &Kernel::kernel_linear;
- break;
- case POLY:
- kernel_function = &Kernel::kernel_poly;
- break;
- case RBF:
- kernel_function = &Kernel::kernel_rbf;
- break;
- case SIGMOID:
- kernel_function = &Kernel::kernel_sigmoid;
- break;
- case PRECOMPUTED:
- kernel_function = &Kernel::kernel_precomputed;
- break;
- }
- clone(x, x_, l);
- if (kernel_type == RBF) {
- x_square = new double[l];
- for (int i = 0; i < l; i++)
- x_square[i] = dot(x[i], x[i]);
- } else
- x_square = 0;
- }
- Kernel::~Kernel() {
- delete[] x;
- delete[] x_square;
- }
- double Kernel::dot(const svm_node *px, const svm_node *py) {
- double sum = 0;
- while (px->index != -1 && py->index != -1) {
- if (px->index == py->index) {
- sum += px->value * py->value;
- ++px;
- ++py;
- } else {
- if (px->index > py->index)
- ++py;
- else
- ++px;
- }
- }
- return sum;
- }
- double Kernel::k_function(const svm_node *x, const svm_node *y, const svm_parameter ¶m) {
- switch (param.kernel_type) {
- case LINEAR:
- return dot(x, y);
- case POLY:
- return powi(param.gamma * dot(x, y) + param.coef0, param.degree);
- case RBF: {
- double sum = 0;
- while (x->index != -1 && y->index != -1) {
- if (x->index == y->index) {
- double d = x->value - y->value;
- sum += d * d;
- ++x;
- ++y;
- } else {
- if (x->index > y->index) {
- sum += y->value * y->value;
- ++y;
- } else {
- sum += x->value * x->value;
- ++x;
- }
- }
- }
- while (x->index != -1) {
- sum += x->value * x->value;
- ++x;
- }
- while (y->index != -1) {
- sum += y->value * y->value;
- ++y;
- }
- return exp(-param.gamma * sum);
- }
- case SIGMOID:
- return tanh(param.gamma * dot(x, y) + param.coef0);
- case PRECOMPUTED: //x: test (validation), y: SV
- return x[(int) (y->value)].value;
- default:
- return 0; // Unreachable
- }
- }
- // An SMO algorithm in Fan et al., JMLR 6(2005), p. 1889--1918
- // Solves:
- //
- // min 0.5(\alpha^T Q \alpha) + p^T \alpha
- //
- // y^T \alpha = \delta
- // y_i = +1 or -1
- // 0 <= alpha_i <= Cp for y_i = 1
- // 0 <= alpha_i <= Cn for y_i = -1
- //
- // Given:
- //
- // Q, p, y, Cp, Cn, and an initial feasible point \alpha
- // l is the size of vectors and matrices
- // eps is the stopping tolerance
- //
- // solution will be put in \alpha, objective value will be put in obj
- //
- class Solver {
- public:
- Solver() {};
- virtual ~Solver() {};
- struct SolutionInfo {
- double obj;
- double rho;
- double upper_bound_p;
- double upper_bound_n;
- double r; // for Solver_NU
- };
- void Solve(int l, const QMatrix &Q, const double *p_, const schar *y_,
- double *alpha_, double Cp, double Cn, double eps,
- SolutionInfo *si, int shrinking);
- protected:
- int active_size;
- schar *y;
- double *G; // gradient of objective function
- enum {
- LOWER_BOUND, UPPER_BOUND, FREE
- };
- char *alpha_status; // LOWER_BOUND, UPPER_BOUND, FREE
- double *alpha;
- const QMatrix *Q;
- const double *QD;
- double eps;
- double Cp, Cn;
- double *p;
- int *active_set;
- double *G_bar; // gradient, if we treat free variables as 0
- int l;
- bool unshrink; // XXX
- double get_C(int i) {
- return (y[i] > 0) ? Cp : Cn;
- }
- void update_alpha_status(int i) {
- if (alpha[i] >= get_C(i))
- alpha_status[i] = UPPER_BOUND;
- else if (alpha[i] <= 0)
- alpha_status[i] = LOWER_BOUND;
- else alpha_status[i] = FREE;
- }
- bool is_upper_bound(int i) { return alpha_status[i] == UPPER_BOUND; }
- bool is_lower_bound(int i) { return alpha_status[i] == LOWER_BOUND; }
- bool is_free(int i) { return alpha_status[i] == FREE; }
- void swap_index(int i, int j);
- void reconstruct_gradient();
- virtual int select_working_set(int &i, int &j);
- virtual double calculate_rho();
- virtual void do_shrinking();
- private:
- bool be_shrunk(int i, double Gmax1, double Gmax2);
- };
- void Solver::swap_index(int i, int j) {
- Q->swap_index(i, j);
- swap(y[i], y[j]);
- swap(G[i], G[j]);
- swap(alpha_status[i], alpha_status[j]);
- swap(alpha[i], alpha[j]);
- swap(p[i], p[j]);
- swap(active_set[i], active_set[j]);
- swap(G_bar[i], G_bar[j]);
- }
- void Solver::reconstruct_gradient() {
- // reconstruct inactive elements of G from G_bar and free variables
- if (active_size == l) return;
- int i, j;
- int nr_free = 0;
- for (j = active_size; j < l; j++)
- G[j] = G_bar[j] + p[j];
- for (j = 0; j < active_size; j++)
- if (is_free(j))
- nr_free++;
- if (2 * nr_free < active_size)
- info("\nWARNING: using -h 0 may be faster\n");
- if (nr_free * l > 2 * active_size * (l - active_size)) {
- for (i = active_size; i < l; i++) {
- const Qfloat *Q_i = Q->get_Q(i, active_size);
- for (j = 0; j < active_size; j++)
- if (is_free(j))
- G[i] += alpha[j] * Q_i[j];
- }
- } else {
- for (i = 0; i < active_size; i++)
- if (is_free(i)) {
- const Qfloat *Q_i = Q->get_Q(i, l);
- double alpha_i = alpha[i];
- for (j = active_size; j < l; j++)
- G[j] += alpha_i * Q_i[j];
- }
- }
- }
- void Solver::Solve(int l, const QMatrix &Q, const double *p_, const schar *y_,
- double *alpha_, double Cp, double Cn, double eps,
- SolutionInfo *si, int shrinking) {
- this->l = l;
- this->Q = &Q;
- QD = Q.get_QD();
- clone(p, p_, l);
- clone(y, y_, l);
- clone(alpha, alpha_, l);
- this->Cp = Cp;
- this->Cn = Cn;
- this->eps = eps;
- unshrink = false;
- // initialize alpha_status
- {
- alpha_status = new char[l];
- for (int i = 0; i < l; i++)
- update_alpha_status(i);
- }
- // initialize active set (for shrinking)
- {
- active_set = new int[l];
- for (int i = 0; i < l; i++)
- active_set[i] = i;
- active_size = l;
- }
- // initialize gradient
- {
- G = new double[l];
- G_bar = new double[l];
- int i;
- for (i = 0; i < l; i++) {
- G[i] = p[i];
- G_bar[i] = 0;
- }
- for (i = 0; i < l; i++)
- if (!is_lower_bound(i)) {
- const Qfloat *Q_i = Q.get_Q(i, l);
- double alpha_i = alpha[i];
- int j;
- for (j = 0; j < l; j++)
- G[j] += alpha_i * Q_i[j];
- if (is_upper_bound(i))
- for (j = 0; j < l; j++)
- G_bar[j] += get_C(i) * Q_i[j];
- }
- }
- // optimization step
- int iter = 0;
- int max_iter = max(10000000, l > INT_MAX / 100 ? INT_MAX : 100 * l);
- int counter = min(l, 1000) + 1;
- while (iter < max_iter) {
- // show progress and do shrinking
- if (--counter == 0) {
- counter = min(l, 1000);
- if (shrinking) do_shrinking();
- info(".");
- }
- int i, j;
- if (select_working_set(i, j) != 0) {
- // reconstruct the whole gradient
- reconstruct_gradient();
- // reset active set size and check
- active_size = l;
- info("*");
- if (select_working_set(i, j) != 0)
- break;
- else
- counter = 1; // do shrinking next iteration
- }
- ++iter;
- // update alpha[i] and alpha[j], handle bounds carefully
- const Qfloat *Q_i = Q.get_Q(i, active_size);
- const Qfloat *Q_j = Q.get_Q(j, active_size);
- double C_i = get_C(i);
- double C_j = get_C(j);
- double old_alpha_i = alpha[i];
- double old_alpha_j = alpha[j];
- if (y[i] != y[j]) {
- double quad_coef = QD[i] + QD[j] + 2 * Q_i[j];
- if (quad_coef <= 0)
- quad_coef = TAU;
- double delta = (-G[i] - G[j]) / quad_coef;
- double diff = alpha[i] - alpha[j];
- alpha[i] += delta;
- alpha[j] += delta;
- if (diff > 0) {
- if (alpha[j] < 0) {
- alpha[j] = 0;
- alpha[i] = diff;
- }
- } else {
- if (alpha[i] < 0) {
- alpha[i] = 0;
- alpha[j] = -diff;
- }
- }
- if (diff > C_i - C_j) {
- if (alpha[i] > C_i) {
- alpha[i] = C_i;
- alpha[j] = C_i - diff;
- }
- } else {
- if (alpha[j] > C_j) {
- alpha[j] = C_j;
- alpha[i] = C_j + diff;
- }
- }
- } else {
- double quad_coef = QD[i] + QD[j] - 2 * Q_i[j];
- if (quad_coef <= 0)
- quad_coef = TAU;
- double delta = (G[i] - G[j]) / quad_coef;
- double sum = alpha[i] + alpha[j];
- alpha[i] -= delta;
- alpha[j] += delta;
- if (sum > C_i) {
- if (alpha[i] > C_i) {
- alpha[i] = C_i;
- alpha[j] = sum - C_i;
- }
- } else {
- if (alpha[j] < 0) {
- alpha[j] = 0;
- alpha[i] = sum;
- }
- }
- if (sum > C_j) {
- if (alpha[j] > C_j) {
- alpha[j] = C_j;
- alpha[i] = sum - C_j;
- }
- } else {
- if (alpha[i] < 0) {
- alpha[i] = 0;
- alpha[j] = sum;
- }
- }
- }
- // update G
- double delta_alpha_i = alpha[i] - old_alpha_i;
- double delta_alpha_j = alpha[j] - old_alpha_j;
- for (int k = 0; k < active_size; k++) {
- G[k] += Q_i[k] * delta_alpha_i + Q_j[k] * delta_alpha_j;
- }
- // update alpha_status and G_bar
- {
- bool ui = is_upper_bound(i);
- bool uj = is_upper_bound(j);
- update_alpha_status(i);
- update_alpha_status(j);
- int k;
- if (ui != is_upper_bound(i)) {
- Q_i = Q.get_Q(i, l);
- if (ui)
- for (k = 0; k < l; k++)
- G_bar[k] -= C_i * Q_i[k];
- else
- for (k = 0; k < l; k++)
- G_bar[k] += C_i * Q_i[k];
- }
- if (uj != is_upper_bound(j)) {
- Q_j = Q.get_Q(j, l);
- if (uj)
- for (k = 0; k < l; k++)
- G_bar[k] -= C_j * Q_j[k];
- else
- for (k = 0; k < l; k++)
- G_bar[k] += C_j * Q_j[k];
- }
- }
- }
- if (iter >= max_iter) {
- if (active_size < l) {
- // reconstruct the whole gradient to calculate objective value
- reconstruct_gradient();
- active_size = l;
- info("*");
- }
- fprintf(stderr, "\nWARNING: reaching max number of iterations\n");
- }
- // calculate rho
- si->rho = calculate_rho();
- // calculate objective value
- {
- double v = 0;
- int i;
- for (i = 0; i < l; i++)
- v += alpha[i] * (G[i] + p[i]);
- si->obj = v / 2;
- }
- // put back the solution
- {
- for (int i = 0; i < l; i++)
- alpha_[active_set[i]] = alpha[i];
- }
- // juggle everything back
- /*{
- for(int i=0;i<l;i++)
- while(active_set[i] != i)
- swap_index(i,active_set[i]);
- // or Q.swap_index(i,active_set[i]);
- }*/
- si->upper_bound_p = Cp;
- si->upper_bound_n = Cn;
- info("\noptimization finished, #iter = %d\n", iter);
- delete[] p;
- delete[] y;
- delete[] alpha;
- delete[] alpha_status;
- delete[] active_set;
- delete[] G;
- delete[] G_bar;
- }
- // return 1 if already optimal, return 0 otherwise
- int Solver::select_working_set(int &out_i, int &out_j) {
- // return i,j such that
- // i: maximizes -y_i * grad(f)_i, i in I_up(\alpha)
- // j: minimizes the decrease of obj value
- // (if quadratic coefficeint <= 0, replace it with tau)
- // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha)
- double Gmax = -INF;
- double Gmax2 = -INF;
- int Gmax_idx = -1;
- int Gmin_idx = -1;
- double obj_diff_min = INF;
- for (int t = 0; t < active_size; t++)
- if (y[t] == +1) {
- if (!is_upper_bound(t))
- if (-G[t] >= Gmax) {
- Gmax = -G[t];
- Gmax_idx = t;
- }
- } else {
- if (!is_lower_bound(t))
- if (G[t] >= Gmax) {
- Gmax = G[t];
- Gmax_idx = t;
- }
- }
- int i = Gmax_idx;
- const Qfloat *Q_i = NULL;
- if (i != -1) // NULL Q_i not accessed: Gmax=-INF if i=-1
- Q_i = Q->get_Q(i, active_size);
- for (int j = 0; j < active_size; j++) {
- if (y[j] == +1) {
- if (!is_lower_bound(j)) {
- double grad_diff = Gmax + G[j];
- if (G[j] >= Gmax2)
- Gmax2 = G[j];
- if (grad_diff > 0) {
- double obj_diff;
- double quad_coef = QD[i] + QD[j] - 2.0 * y[i] * Q_i[j];
- if (quad_coef > 0)
- obj_diff = -(grad_diff * grad_diff) / quad_coef;
- else
- obj_diff = -(grad_diff * grad_diff) / TAU;
- if (obj_diff <= obj_diff_min) {
- Gmin_idx = j;
- obj_diff_min = obj_diff;
- }
- }
- }
- } else {
- if (!is_upper_bound(j)) {
- double grad_diff = Gmax - G[j];
- if (-G[j] >= Gmax2)
- Gmax2 = -G[j];
- if (grad_diff > 0) {
- double obj_diff;
- double quad_coef = QD[i] + QD[j] + 2.0 * y[i] * Q_i[j];
- if (quad_coef > 0)
- obj_diff = -(grad_diff * grad_diff) / quad_coef;
- else
- obj_diff = -(grad_diff * grad_diff) / TAU;
- if (obj_diff <= obj_diff_min) {
- Gmin_idx = j;
- obj_diff_min = obj_diff;
- }
- }
- }
- }
- }
- if (Gmax + Gmax2 < eps || Gmin_idx == -1)
- return 1;
- out_i = Gmax_idx;
- out_j = Gmin_idx;
- return 0;
- }
- bool Solver::be_shrunk(int i, double Gmax1, double Gmax2) {
- if (is_upper_bound(i)) {
- if (y[i] == +1)
- return (-G[i] > Gmax1);
- else
- return (-G[i] > Gmax2);
- } else if (is_lower_bound(i)) {
- if (y[i] == +1)
- return (G[i] > Gmax2);
- else
- return (G[i] > Gmax1);
- } else
- return (false);
- }
- void Solver::do_shrinking() {
- int i;
- double Gmax1 = -INF; // max { -y_i * grad(f)_i | i in I_up(\alpha) }
- double Gmax2 = -INF; // max { y_i * grad(f)_i | i in I_low(\alpha) }
- // find maximal violating pair first
- for (i = 0; i < active_size; i++) {
- if (y[i] == +1) {
- if (!is_upper_bound(i)) {
- if (-G[i] >= Gmax1)
- Gmax1 = -G[i];
- }
- if (!is_lower_bound(i)) {
- if (G[i] >= Gmax2)
- Gmax2 = G[i];
- }
- } else {
- if (!is_upper_bound(i)) {
- if (-G[i] >= Gmax2)
- Gmax2 = -G[i];
- }
- if (!is_lower_bound(i)) {
- if (G[i] >= Gmax1)
- Gmax1 = G[i];
- }
- }
- }
- if (unshrink == false && Gmax1 + Gmax2 <= eps * 10) {
- unshrink = true;
- reconstruct_gradient();
- active_size = l;
- info("*");
- }
- for (i = 0; i < active_size; i++)
- if (be_shrunk(i, Gmax1, Gmax2)) {
- active_size--;
- while (active_size > i) {
- if (!be_shrunk(active_size, Gmax1, Gmax2)) {
- swap_index(i, active_size);
- break;
- }
- active_size--;
- }
- }
- }
- double Solver::calculate_rho() {
- double r;
- int nr_free = 0;
- double ub = INF, lb = -INF, sum_free = 0;
- for (int i = 0; i < active_size; i++) {
- double yG = y[i] * G[i];
- if (is_upper_bound(i)) {
- if (y[i] == -1)
- ub = min(ub, yG);
- else
- lb = max(lb, yG);
- } else if (is_lower_bound(i)) {
- if (y[i] == +1)
- ub = min(ub, yG);
- else
- lb = max(lb, yG);
- } else {
- ++nr_free;
- sum_free += yG;
- }
- }
- if (nr_free > 0)
- r = sum_free / nr_free;
- else
- r = (ub + lb) / 2;
- return r;
- }
- //
- // Solver for nu-svm classification and regression
- //
- // additional constraint: e^T \alpha = constant
- //
- class Solver_NU : public Solver {
- public:
- Solver_NU() {}
- void Solve(int l, const QMatrix &Q, const double *p, const schar *y,
- double *alpha, double Cp, double Cn, double eps,
- SolutionInfo *si, int shrinking) {
- this->si = si;
- Solver::Solve(l, Q, p, y, alpha, Cp, Cn, eps, si, shrinking);
- }
- private:
- SolutionInfo *si;
- int select_working_set(int &i, int &j);
- double calculate_rho();
- bool be_shrunk(int i, double Gmax1, double Gmax2, double Gmax3, double Gmax4);
- void do_shrinking();
- };
- // return 1 if already optimal, return 0 otherwise
- int Solver_NU::select_working_set(int &out_i, int &out_j) {
- // return i,j such that y_i = y_j and
- // i: maximizes -y_i * grad(f)_i, i in I_up(\alpha)
- // j: minimizes the decrease of obj value
- // (if quadratic coefficeint <= 0, replace it with tau)
- // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha)
- double Gmaxp = -INF;
- double Gmaxp2 = -INF;
- int Gmaxp_idx = -1;
- double Gmaxn = -INF;
- double Gmaxn2 = -INF;
- int Gmaxn_idx = -1;
- int Gmin_idx = -1;
- double obj_diff_min = INF;
- for (int t = 0; t < active_size; t++)
- if (y[t] == +1) {
- if (!is_upper_bound(t))
- if (-G[t] >= Gmaxp) {
- Gmaxp = -G[t];
- Gmaxp_idx = t;
- }
- } else {
- if (!is_lower_bound(t))
- if (G[t] >= Gmaxn) {
- Gmaxn = G[t];
- Gmaxn_idx = t;
- }
- }
- int ip = Gmaxp_idx;
- int in = Gmaxn_idx;
- const Qfloat *Q_ip = NULL;
- const Qfloat *Q_in = NULL;
- if (ip != -1) // NULL Q_ip not accessed: Gmaxp=-INF if ip=-1
- Q_ip = Q->get_Q(ip, active_size);
- if (in != -1)
- Q_in = Q->get_Q(in, active_size);
- for (int j = 0; j < active_size; j++) {
- if (y[j] == +1) {
- if (!is_lower_bound(j)) {
- double grad_diff = Gmaxp + G[j];
- if (G[j] >= Gmaxp2)
- Gmaxp2 = G[j];
- if (grad_diff > 0) {
- double obj_diff;
- double quad_coef = QD[ip] + QD[j] - 2 * Q_ip[j];
- if (quad_coef > 0)
- obj_diff = -(grad_diff * grad_diff) / quad_coef;
- else
- obj_diff = -(grad_diff * grad_diff) / TAU;
- if (obj_diff <= obj_diff_min) {
- Gmin_idx = j;
- obj_diff_min = obj_diff;
- }
- }
- }
- } else {
- if (!is_upper_bound(j)) {
- double grad_diff = Gmaxn - G[j];
- if (-G[j] >= Gmaxn2)
- Gmaxn2 = -G[j];
- if (grad_diff > 0) {
- double obj_diff;
- double quad_coef = QD[in] + QD[j] - 2 * Q_in[j];
- if (quad_coef > 0)
- obj_diff = -(grad_diff * grad_diff) / quad_coef;
- else
- obj_diff = -(grad_diff * grad_diff) / TAU;
- if (obj_diff <= obj_diff_min) {
- Gmin_idx = j;
- obj_diff_min = obj_diff;
- }
- }
- }
- }
- }
- if (max(Gmaxp + Gmaxp2, Gmaxn + Gmaxn2) < eps || Gmin_idx == -1)
- return 1;
- if (y[Gmin_idx] == +1)
- out_i = Gmaxp_idx;
- else
- out_i = Gmaxn_idx;
- out_j = Gmin_idx;
- return 0;
- }
- bool Solver_NU::be_shrunk(int i, double Gmax1, double Gmax2, double Gmax3, double Gmax4) {
- if (is_upper_bound(i)) {
- if (y[i] == +1)
- return (-G[i] > Gmax1);
- else
- return (-G[i] > Gmax4);
- } else if (is_lower_bound(i)) {
- if (y[i] == +1)
- return (G[i] > Gmax2);
- else
- return (G[i] > Gmax3);
- } else
- return (false);
- }
- void Solver_NU::do_shrinking() {
- double Gmax1 = -INF; // max { -y_i * grad(f)_i | y_i = +1, i in I_up(\alpha) }
- double Gmax2 = -INF; // max { y_i * grad(f)_i | y_i = +1, i in I_low(\alpha) }
- double Gmax3 = -INF; // max { -y_i * grad(f)_i | y_i = -1, i in I_up(\alpha) }
- double Gmax4 = -INF; // max { y_i * grad(f)_i | y_i = -1, i in I_low(\alpha) }
- // find maximal violating pair first
- int i;
- for (i = 0; i < active_size; i++) {
- if (!is_upper_bound(i)) {
- if (y[i] == +1) {
- if (-G[i] > Gmax1) Gmax1 = -G[i];
- } else if (-G[i] > Gmax4) Gmax4 = -G[i];
- }
- if (!is_lower_bound(i)) {
- if (y[i] == +1) {
- if (G[i] > Gmax2) Gmax2 = G[i];
- } else if (G[i] > Gmax3) Gmax3 = G[i];
- }
- }
- if (unshrink == false && max(Gmax1 + Gmax2, Gmax3 + Gmax4) <= eps * 10) {
- unshrink = true;
- reconstruct_gradient();
- active_size = l;
- }
- for (i = 0; i < active_size; i++)
- if (be_shrunk(i, Gmax1, Gmax2, Gmax3, Gmax4)) {
- active_size--;
- while (active_size > i) {
- if (!be_shrunk(active_size, Gmax1, Gmax2, Gmax3, Gmax4)) {
- swap_index(i, active_size);
- break;
- }
- active_size--;
- }
- }
- }
- double Solver_NU::calculate_rho() {
- int nr_free1 = 0, nr_free2 = 0;
- double ub1 = INF, ub2 = INF;
- double lb1 = -INF, lb2 = -INF;
- double sum_free1 = 0, sum_free2 = 0;
- for (int i = 0; i < active_size; i++) {
- if (y[i] == +1) {
- if (is_upper_bound(i))
- lb1 = max(lb1, G[i]);
- else if (is_lower_bound(i))
- ub1 = min(ub1, G[i]);
- else {
- ++nr_free1;
- sum_free1 += G[i];
- }
- } else {
- if (is_upper_bound(i))
- lb2 = max(lb2, G[i]);
- else if (is_lower_bound(i))
- ub2 = min(ub2, G[i]);
- else {
- ++nr_free2;
- sum_free2 += G[i];
- }
- }
- }
- double r1, r2;
- if (nr_free1 > 0)
- r1 = sum_free1 / nr_free1;
- else
- r1 = (ub1 + lb1) / 2;
- if (nr_free2 > 0)
- r2 = sum_free2 / nr_free2;
- else
- r2 = (ub2 + lb2) / 2;
- si->r = (r1 + r2) / 2;
- return (r1 - r2) / 2;
- }
- //
- // Q matrices for various formulations
- //
- class SVC_Q : public Kernel {
- public:
- SVC_Q(const svm_problem &prob, const svm_parameter ¶m, const schar *y_)
- : Kernel(prob.l, prob.x, param) {
- clone(y, y_, prob.l);
- cache = new Cache(prob.l, (long int) (param.cache_size * (1 << 20)));
- QD = new double[prob.l];
- for (int i = 0; i < prob.l; i++)
- QD[i] = (this->*kernel_function)(i, i);
- }
- Qfloat *get_Q(int i, int len) const {
- Qfloat *data;
- int start, j;
- if ((start = cache->get_data(i, &data, len)) < len) {
- #ifdef _OPENMP
- #pragma omp parallel for private(j) schedule(guided)
- #endif
- for (j = start; j < len; j++)
- data[j] = (Qfloat) (y[i] * y[j] * (this->*kernel_function)(i, j));
- }
- return data;
- }
- double *get_QD() const {
- return QD;
- }
- void swap_index(int i, int j) const {
- cache->swap_index(i, j);
- Kernel::swap_index(i, j);
- swap(y[i], y[j]);
- swap(QD[i], QD[j]);
- }
- ~SVC_Q() {
- delete[] y;
- delete cache;
- delete[] QD;
- }
- private:
- schar *y;
- Cache *cache;
- double *QD;
- };
- class ONE_CLASS_Q : public Kernel {
- public:
- ONE_CLASS_Q(const svm_problem &prob, const svm_parameter ¶m)
- : Kernel(prob.l, prob.x, param) {
- cache = new Cache(prob.l, (long int) (param.cache_size * (1 << 20)));
- QD = new double[prob.l];
- for (int i = 0; i < prob.l; i++)
- QD[i] = (this->*kernel_function)(i, i);
- }
- Qfloat *get_Q(int i, int len) const {
- Qfloat *data;
- int start, j;
- if ((start = cache->get_data(i, &data, len)) < len) {
- for (j = start; j < len; j++)
- data[j] = (Qfloat) (this->*kernel_function)(i, j);
- }
- return data;
- }
- double *get_QD() const {
- return QD;
- }
- void swap_index(int i, int j) const {
- cache->swap_index(i, j);
- Kernel::swap_index(i, j);
- swap(QD[i], QD[j]);
- }
- ~ONE_CLASS_Q() {
- delete cache;
- delete[] QD;
- }
- private:
- Cache *cache;
- double *QD;
- };
- class SVR_Q : public Kernel {
- public:
- SVR_Q(const svm_problem &prob, const svm_parameter ¶m)
- : Kernel(prob.l, prob.x, param) {
- l = prob.l;
- cache = new Cache(l, (long int) (param.cache_size * (1 << 20)));
- QD = new double[2 * l];
- sign = new schar[2 * l];
- index = new int[2 * l];
- for (int k = 0; k < l; k++) {
- sign[k] = 1;
- sign[k + l] = -1;
- index[k] = k;
- index[k + l] = k;
- QD[k] = (this->*kernel_function)(k, k);
- QD[k + l] = QD[k];
- }
- buffer[0] = new Qfloat[2 * l];
- buffer[1] = new Qfloat[2 * l];
- next_buffer = 0;
- }
- void swap_index(int i, int j) const {
- swap(sign[i], sign[j]);
- swap(index[i], index[j]);
- swap(QD[i], QD[j]);
- }
- Qfloat *get_Q(int i, int len) const {
- Qfloat *data;
- int j, real_i = index[i];
- if (cache->get_data(real_i, &data, l) < l) {
- #ifdef _OPENMP
- #pragma omp parallel for private(j) schedule(guided)
- #endif
- for (j = 0; j < l; j++)
- data[j] = (Qfloat) (this->*kernel_function)(real_i, j);
- }
- // reorder and copy
- Qfloat *buf = buffer[next_buffer];
- next_buffer = 1 - next_buffer;
- schar si = sign[i];
- for (j = 0; j < len; j++)
- buf[j] = (Qfloat) si * (Qfloat) sign[j] * data[index[j]];
- return buf;
- }
- double *get_QD() const {
- return QD;
- }
- ~SVR_Q() {
- delete cache;
- delete[] sign;
- delete[] index;
- delete[] buffer[0];
- delete[] buffer[1];
- delete[] QD;
- }
- private:
- int l;
- Cache *cache;
- schar *sign;
- int *index;
- mutable int next_buffer;
- Qfloat *buffer[2];
- double *QD;
- };
- //
- // construct and solve various formulations
- //
- static void solve_c_svc(
- const svm_problem *prob, const svm_parameter *param,
- double *alpha, Solver::SolutionInfo *si, double Cp, double Cn) {
- int l = prob->l;
- double *minus_ones = new double[l];
- schar *y = new schar[l];
- int i;
- for (i = 0; i < l; i++) {
- alpha[i] = 0;
- minus_ones[i] = -1;
- if (prob->y[i] > 0) y[i] = +1; else y[i] = -1;
- }
- Solver s;
- s.Solve(l, SVC_Q(*prob, *param, y), minus_ones, y,
- alpha, Cp, Cn, param->eps, si, param->shrinking);
- double sum_alpha = 0;
- for (i = 0; i < l; i++)
- sum_alpha += alpha[i];
- if (Cp == Cn)
- info("nu = %f\n", sum_alpha / (Cp * prob->l));
- for (i = 0; i < l; i++)
- alpha[i] *= y[i];
- delete[] minus_ones;
- delete[] y;
- }
- static void solve_nu_svc(
- const svm_problem *prob, const svm_parameter *param,
- double *alpha, Solver::SolutionInfo *si) {
- int i;
- int l = prob->l;
- double nu = param->nu;
- schar *y = new schar[l];
- for (i = 0; i < l; i++)
- if (prob->y[i] > 0)
- y[i] = +1;
- else
- y[i] = -1;
- double sum_pos = nu * l / 2;
- double sum_neg = nu * l / 2;
- for (i = 0; i < l; i++)
- if (y[i] == +1) {
- alpha[i] = min(1.0, sum_pos);
- sum_pos -= alpha[i];
- } else {
- alpha[i] = min(1.0, sum_neg);
- sum_neg -= alpha[i];
- }
- double *zeros = new double[l];
- for (i = 0; i < l; i++)
- zeros[i] = 0;
- Solver_NU s;
- s.Solve(l, SVC_Q(*prob, *param, y), zeros, y,
- alpha, 1.0, 1.0, param->eps, si, param->shrinking);
- double r = si->r;
- info("C = %f\n", 1 / r);
- for (i = 0; i < l; i++)
- alpha[i] *= y[i] / r;
- si->rho /= r;
- si->obj /= (r * r);
- si->upper_bound_p = 1 / r;
- si->upper_bound_n = 1 / r;
- delete[] y;
- delete[] zeros;
- }
- static void solve_one_class(
- const svm_problem *prob, const svm_parameter *param,
- double *alpha, Solver::SolutionInfo *si) {
- int l = prob->l;
- double *zeros = new double[l];
- schar *ones = new schar[l];
- int i;
- int n = (int) (param->nu * prob->l); // # of alpha's at upper bound
- for (i = 0; i < n; i++)
- alpha[i] = 1;
- if (n < prob->l)
- alpha[n] = param->nu * prob->l - n;
- for (i = n + 1; i < l; i++)
- alpha[i] = 0;
- for (i = 0; i < l; i++) {
- zeros[i] = 0;
- ones[i] = 1;
- }
- Solver s;
- s.Solve(l, ONE_CLASS_Q(*prob, *param), zeros, ones,
- alpha, 1.0, 1.0, param->eps, si, param->shrinking);
- delete[] zeros;
- delete[] ones;
- }
- static void solve_epsilon_svr(
- const svm_problem *prob, const svm_parameter *param,
- double *alpha, Solver::SolutionInfo *si) {
- int l = prob->l;
- double *alpha2 = new double[2 * l];
- double *linear_term = new double[2 * l];
- schar *y = new schar[2 * l];
- int i;
- for (i = 0; i < l; i++) {
- alpha2[i] = 0;
- linear_term[i] = param->p - prob->y[i];
- y[i] = 1;
- alpha2[i + l] = 0;
- linear_term[i + l] = param->p + prob->y[i];
- y[i + l] = -1;
- }
- Solver s;
- s.Solve(2 * l, SVR_Q(*prob, *param), linear_term, y,
- alpha2, param->C, param->C, param->eps, si, param->shrinking);
- double sum_alpha = 0;
- for (i = 0; i < l; i++) {
- alpha[i] = alpha2[i] - alpha2[i + l];
- sum_alpha += fabs(alpha[i]);
- }
- info("nu = %f\n", sum_alpha / (param->C * l));
- delete[] alpha2;
- delete[] linear_term;
- delete[] y;
- }
- static void solve_nu_svr(
- const svm_problem *prob, const svm_parameter *param,
- double *alpha, Solver::SolutionInfo *si) {
- int l = prob->l;
- double C = param->C;
- double *alpha2 = new double[2 * l];
- double *linear_term = new double[2 * l];
- schar *y = new schar[2 * l];
- int i;
- double sum = C * param->nu * l / 2;
- for (i = 0; i < l; i++) {
- alpha2[i] = alpha2[i + l] = min(sum, C);
- sum -= alpha2[i];
- linear_term[i] = -prob->y[i];
- y[i] = 1;
- linear_term[i + l] = prob->y[i];
- y[i + l] = -1;
- }
- Solver_NU s;
- s.Solve(2 * l, SVR_Q(*prob, *param), linear_term, y,
- alpha2, C, C, param->eps, si, param->shrinking);
- info("epsilon = %f\n", -si->r);
- for (i = 0; i < l; i++)
- alpha[i] = alpha2[i] - alpha2[i + l];
- delete[] alpha2;
- delete[] linear_term;
- delete[] y;
- }
- //
- // decision_function
- //
- struct decision_function {
- double *alpha;
- double rho;
- };
- static decision_function svm_train_one(
- const svm_problem *prob, const svm_parameter *param,
- double Cp, double Cn) {
- double *alpha = Malloc(double, prob->l);
- Solver::SolutionInfo si;
- switch (param->svm_type) {
- case C_SVC:
- solve_c_svc(prob, param, alpha, &si, Cp, Cn);
- break;
- case NU_SVC:
- solve_nu_svc(prob, param, alpha, &si);
- break;
- case ONE_CLASS:
- solve_one_class(prob, param, alpha, &si);
- break;
- case EPSILON_SVR:
- solve_epsilon_svr(prob, param, alpha, &si);
- break;
- case NU_SVR:
- solve_nu_svr(prob, param, alpha, &si);
- break;
- }
- info("obj = %f, rho = %f\n", si.obj, si.rho);
- // output SVs
- int nSV = 0;
- int nBSV = 0;
- for (int i = 0; i < prob->l; i++) {
- if (fabs(alpha[i]) > 0) {
- ++nSV;
- if (prob->y[i] > 0) {
- if (fabs(alpha[i]) >= si.upper_bound_p)
- ++nBSV;
- } else {
- if (fabs(alpha[i]) >= si.upper_bound_n)
- ++nBSV;
- }
- }
- }
- info("nSV = %d, nBSV = %d\n", nSV, nBSV);
- decision_function f;
- f.alpha = alpha;
- f.rho = si.rho;
- return f;
- }
- // Platt's binary SVM Probablistic Output: an improvement from Lin et al.
- static void sigmoid_train(
- int l, const double *dec_values, const double *labels,
- double &A, double &B) {
- double prior1 = 0, prior0 = 0;
- int i;
- for (i = 0; i < l; i++)
- if (labels[i] > 0) prior1 += 1;
- else prior0 += 1;
- int max_iter = 100; // Maximal number of iterations
- double min_step = 1e-10; // Minimal step taken in line search
- double sigma = 1e-12; // For numerically strict PD of Hessian
- double eps = 1e-5;
- double hiTarget = (prior1 + 1.0) / (prior1 + 2.0);
- double loTarget = 1 / (prior0 + 2.0);
- double *t = Malloc(double, l);
- double fApB, p, q, h11, h22, h21, g1, g2, det, dA, dB, gd, stepsize;
- double newA, newB, newf, d1, d2;
- int iter;
- // Initial Point and Initial Fun Value
- A = 0.0;
- B = log((prior0 + 1.0) / (prior1 + 1.0));
- double fval = 0.0;
- for (i = 0; i < l; i++) {
- if (labels[i] > 0) t[i] = hiTarget;
- else t[i] = loTarget;
- fApB = dec_values[i] * A + B;
- if (fApB >= 0)
- fval += t[i] * fApB + log(1 + exp(-fApB));
- else
- fval += (t[i] - 1) * fApB + log(1 + exp(fApB));
- }
- for (iter = 0; iter < max_iter; iter++) {
- // Update Gradient and Hessian (use H' = H + sigma I)
- h11 = sigma; // numerically ensures strict PD
- h22 = sigma;
- h21 = 0.0;
- g1 = 0.0;
- g2 = 0.0;
- for (i = 0; i < l; i++) {
- fApB = dec_values[i] * A + B;
- if (fApB >= 0) {
- p = exp(-fApB) / (1.0 + exp(-fApB));
- q = 1.0 / (1.0 + exp(-fApB));
- } else {
- p = 1.0 / (1.0 + exp(fApB));
- q = exp(fApB) / (1.0 + exp(fApB));
- }
- d2 = p * q;
- h11 += dec_values[i] * dec_values[i] * d2;
- h22 += d2;
- h21 += dec_values[i] * d2;
- d1 = t[i] - p;
- g1 += dec_values[i] * d1;
- g2 += d1;
- }
- // Stopping Criteria
- if (fabs(g1) < eps && fabs(g2) < eps)
- break;
- // Finding Newton direction: -inv(H') * g
- det = h11 * h22 - h21 * h21;
- dA = -(h22 * g1 - h21 * g2) / det;
- dB = -(-h21 * g1 + h11 * g2) / det;
- gd = g1 * dA + g2 * dB;
- stepsize = 1; // Line Search
- while (stepsize >= min_step) {
- newA = A + stepsize * dA;
- newB = B + stepsize * dB;
- // New function value
- newf = 0.0;
- for (i = 0; i < l; i++) {
- fApB = dec_values[i] * newA + newB;
- if (fApB >= 0)
- newf += t[i] * fApB + log(1 + exp(-fApB));
- else
- newf += (t[i] - 1) * fApB + log(1 + exp(fApB));
- }
- // Check sufficient decrease
- if (newf < fval + 0.0001 * stepsize * gd) {
- A = newA;
- B = newB;
- fval = newf;
- break;
- } else
- stepsize = stepsize / 2.0;
- }
- if (stepsize < min_step) {
- info("Line search fails in two-class probability estimates\n");
- break;
- }
- }
- if (iter >= max_iter)
- info("Reaching maximal iterations in two-class probability estimates\n");
- free(t);
- }
- static double sigmoid_predict(double decision_value, double A, double B) {
- double fApB = decision_value * A + B;
- // 1-p used later; avoid catastrophic cancellation
- if (fApB >= 0)
- return exp(-fApB) / (1.0 + exp(-fApB));
- else
- return 1.0 / (1 + exp(fApB));
- }
- // Method 2 from the multiclass_prob paper by Wu, Lin, and Weng to predict probabilities
- static void multiclass_probability(int k, double **r, double *p) {
- int t, j;
- int iter = 0, max_iter = max(100, k);
- double **Q = Malloc(double *, k);
- double *Qp = Malloc(double, k);
- double pQp, eps = 0.005 / k;
- for (t = 0; t < k; t++) {
- p[t] = 1.0 / k; // Valid if k = 1
- Q[t] = Malloc(double, k);
- Q[t][t] = 0;
- for (j = 0; j < t; j++) {
- Q[t][t] += r[j][t] * r[j][t];
- Q[t][j] = Q[j][t];
- }
- for (j = t + 1; j < k; j++) {
- Q[t][t] += r[j][t] * r[j][t];
- Q[t][j] = -r[j][t] * r[t][j];
- }
- }
- for (iter = 0; iter < max_iter; iter++) {
- // stopping condition, recalculate QP,pQP for numerical accuracy
- pQp = 0;
- for (t = 0; t < k; t++) {
- Qp[t] = 0;
- for (j = 0; j < k; j++)
- Qp[t] += Q[t][j] * p[j];
- pQp += p[t] * Qp[t];
- }
- double max_error = 0;
- for (t = 0; t < k; t++) {
- double error = fabs(Qp[t] - pQp);
- if (error > max_error)
- max_error = error;
- }
- if (max_error < eps) break;
- for (t = 0; t < k; t++) {
- double diff = (-Qp[t] + pQp) / Q[t][t];
- p[t] += diff;
- pQp = (pQp + diff * (diff * Q[t][t] + 2 * Qp[t])) / (1 + diff) / (1 + diff);
- for (j = 0; j < k; j++) {
- Qp[j] = (Qp[j] + diff * Q[t][j]) / (1 + diff);
- p[j] /= (1 + diff);
- }
- }
- }
- if (iter >= max_iter)
- info("Exceeds max_iter in multiclass_prob\n");
- for (t = 0; t < k; t++) free(Q[t]);
- free(Q);
- free(Qp);
- }
- // Using cross-validation decision values to get parameters for SVC probability estimates
- static void svm_binary_svc_probability(
- const svm_problem *prob, const svm_parameter *param,
- double Cp, double Cn, double &probA, double &probB) {
- int i;
- int nr_fold = 5;
- int *perm = Malloc(int, prob->l);
- double *dec_values = Malloc(double, prob->l);
- // random shuffle
- for (i = 0; i < prob->l; i++) perm[i] = i;
- for (i = 0; i < prob->l; i++) {
- int j = i + rand() % (prob->l - i);
- swap(perm[i], perm[j]);
- }
- for (i = 0; i < nr_fold; i++) {
- int begin = i * prob->l / nr_fold;
- int end = (i + 1) * prob->l / nr_fold;
- int j, k;
- struct svm_problem subprob;
- subprob.l = prob->l - (end - begin);
- subprob.x = Malloc(struct svm_node*, subprob.l);
- subprob.y = Malloc(double, subprob.l);
- k = 0;
- for (j = 0; j < begin; j++) {
- subprob.x[k] = prob->x[perm[j]];
- subprob.y[k] = prob->y[perm[j]];
- ++k;
- }
- for (j = end; j < prob->l; j++) {
- subprob.x[k] = prob->x[perm[j]];
- subprob.y[k] = prob->y[perm[j]];
- ++k;
- }
- int p_count = 0, n_count = 0;
- for (j = 0; j < k; j++)
- if (subprob.y[j] > 0)
- p_count++;
- else
- n_count++;
- if (p_count == 0 && n_count == 0)
- for (j = begin; j < end; j++)
- dec_values[perm[j]] = 0;
- else if (p_count > 0 && n_count == 0)
- for (j = begin; j < end; j++)
- dec_values[perm[j]] = 1;
- else if (p_count == 0 && n_count > 0)
- for (j = begin; j < end; j++)
- dec_values[perm[j]] = -1;
- else {
- svm_parameter subparam = *param;
- subparam.probability = 0;
- subparam.C = 1.0;
- subparam.nr_weight = 2;
- subparam.weight_label = Malloc(int, 2);
- subparam.weight = Malloc(double, 2);
- subparam.weight_label[0] = +1;
- subparam.weight_label[1] = -1;
- subparam.weight[0] = Cp;
- subparam.weight[1] = Cn;
- struct svm_model *submodel = svm_train(&subprob, &subparam);
- for (j = begin; j < end; j++) {
- svm_predict_values(submodel, prob->x[perm[j]], &(dec_values[perm[j]]));
- // ensure +1 -1 order; reason not using CV subroutine
- dec_values[perm[j]] *= submodel->label[0];
- }
- svm_free_and_destroy_model(&submodel);
- svm_destroy_param(&subparam);
- }
- free(subprob.x);
- free(subprob.y);
- }
- sigmoid_train(prob->l, dec_values, prob->y, probA, probB);
- free(dec_values);
- free(perm);
- }
- // Binning method from the oneclass_prob paper by Que and Lin to predict the probability as a normal instance (i.e., not an outlier)
- static double predict_one_class_probability(const svm_model *model, double dec_value) {
- double prob_estimate = 0.0;
- int nr_marks = 10;
- if (dec_value < model->prob_density_marks[0])
- prob_estimate = 0.001;
- else if (dec_value > model->prob_density_marks[nr_marks - 1])
- prob_estimate = 0.999;
- else {
- for (int i = 1; i < nr_marks; i++)
- if (dec_value < model->prob_density_marks[i]) {
- prob_estimate = (double) i / nr_marks;
- break;
- }
- }
- return prob_estimate;
- }
- static int compare_double(const void *a, const void *b) {
- if (*(double *) a > *(double *) b)
- return 1;
- else if (*(double *) a < *(double *) b)
- return -1;
- return 0;
- }
- // Get parameters for one-class SVM probability estimates
- static int svm_one_class_probability(const svm_problem *prob, const svm_model *model, double *prob_density_marks) {
- double *dec_values = Malloc(double, prob->l);
- double *pred_results = Malloc(double, prob->l);
- int ret = 0;
- int nr_marks = 10;
- for (int i = 0; i < prob->l; i++)
- pred_results[i] = svm_predict_values(model, prob->x[i], &dec_values[i]);
- qsort(dec_values, prob->l, sizeof(double), compare_double);
- int neg_counter = 0;
- for (int i = 0; i < prob->l; i++)
- if (dec_values[i] >= 0) {
- neg_counter = i;
- break;
- }
- int pos_counter = prob->l - neg_counter;
- if (neg_counter < nr_marks / 2 || pos_counter < nr_marks / 2) {
- fprintf(stderr,
- "WARNING: number of positive or negative decision values <%d; too few to do a probability estimation.\n",
- nr_marks / 2);
- ret = -1;
- } else {
- // Binning by density
- double *tmp_marks = Malloc(double, nr_marks + 1);
- int mid = nr_marks / 2;
- for (int i = 0; i < mid; i++)
- tmp_marks[i] = dec_values[i * neg_counter / mid];
- tmp_marks[mid] = 0;
- for (int i = mid + 1; i < nr_marks + 1; i++)
- tmp_marks[i] = dec_values[neg_counter - 1 + (i - mid) * pos_counter / mid];
- for (int i = 0; i < nr_marks; i++)
- prob_density_marks[i] = (tmp_marks[i] + tmp_marks[i + 1]) / 2;
- free(tmp_marks);
- }
- free(dec_values);
- free(pred_results);
- return ret;
- }
- // Return parameter of a Laplace distribution
- static double svm_svr_probability(
- const svm_problem *prob, const svm_parameter *param) {
- int i;
- int nr_fold = 5;
- double *ymv = Malloc(double, prob->l);
- double mae = 0;
- svm_parameter newparam = *param;
- newparam.probability = 0;
- svm_cross_validation(prob, &newparam, nr_fold, ymv);
- for (i = 0; i < prob->l; i++) {
- ymv[i] = prob->y[i] - ymv[i];
- mae += fabs(ymv[i]);
- }
- mae /= prob->l;
- double std = sqrt(2 * mae * mae);
- int count = 0;
- mae = 0;
- for (i = 0; i < prob->l; i++)
- if (fabs(ymv[i]) > 5 * std)
- count = count + 1;
- else
- mae += fabs(ymv[i]);
- mae /= (prob->l - count);
- info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma= %g\n",
- mae);
- free(ymv);
- return mae;
- }
- // label: label name, start: begin of each class, count: #data of classes, perm: indices to the original data
- // perm, length l, must be allocated before calling this subroutine
- static void
- svm_group_classes(const svm_problem *prob, int *nr_class_ret, int **label_ret, int **start_ret, int **count_ret,
- int *perm) {
- int l = prob->l;
- int max_nr_class = 16;
- int nr_class = 0;
- int *label = Malloc(int, max_nr_class);
- int *count = Malloc(int, max_nr_class);
- int *data_label = Malloc(int, l);
- int i;
- for (i = 0; i < l; i++) {
- int this_label = (int) prob->y[i];
- int j;
- for (j = 0; j < nr_class; j++) {
- if (this_label == label[j]) {
- ++count[j];
- break;
- }
- }
- data_label[i] = j;
- if (j == nr_class) {
- if (nr_class == max_nr_class) {
- max_nr_class *= 2;
- label = (int *) realloc(label, max_nr_class * sizeof(int));
- count = (int *) realloc(count, max_nr_class * sizeof(int));
- }
- label[nr_class] = this_label;
- count[nr_class] = 1;
- ++nr_class;
- }
- }
- //
- // Labels are ordered by their first occurrence in the training set.
- // However, for two-class sets with -1/+1 labels and -1 appears first,
- // we swap labels to ensure that internally the binary SVM has positive data corresponding to the +1 instances.
- //
- if (nr_class == 2 && label[0] == -1 && label[1] == 1) {
- swap(label[0], label[1]);
- swap(count[0], count[1]);
- for (i = 0; i < l; i++) {
- if (data_label[i] == 0)
- data_label[i] = 1;
- else
- data_label[i] = 0;
- }
- }
- int *start = Malloc(int, nr_class);
- start[0] = 0;
- for (i = 1; i < nr_class; i++)
- start[i] = start[i - 1] + count[i - 1];
- for (i = 0; i < l; i++) {
- perm[start[data_label[i]]] = i;
- ++start[data_label[i]];
- }
- start[0] = 0;
- for (i = 1; i < nr_class; i++)
- start[i] = start[i - 1] + count[i - 1];
- *nr_class_ret = nr_class;
- *label_ret = label;
- *start_ret = start;
- *count_ret = count;
- free(data_label);
- }
- //
- // Interface functions
- //
- svm_model *svm_train(const svm_problem *prob, const svm_parameter *param) {
- svm_model *model = Malloc(svm_model, 1);
- model->param = *param;
- model->free_sv = 0; // XXX
- if (param->svm_type == ONE_CLASS ||
- param->svm_type == EPSILON_SVR ||
- param->svm_type == NU_SVR) {
- // regression or one-class-svm
- model->nr_class = 2;
- model->label = NULL;
- model->nSV = NULL;
- model->probA = NULL;
- model->probB = NULL;
- model->prob_density_marks = NULL;
- model->sv_coef = Malloc(double *, 1);
- decision_function f = svm_train_one(prob, param, 0, 0);
- model->rho = Malloc(double, 1);
- model->rho[0] = f.rho;
- int nSV = 0;
- int i;
- for (i = 0; i < prob->l; i++)
- if (fabs(f.alpha[i]) > 0) ++nSV;
- model->l = nSV;
- model->SV = Malloc(svm_node *, nSV);
- model->sv_coef[0] = Malloc(double, nSV);
- model->sv_indices = Malloc(int, nSV);
- int j = 0;
- for (i = 0; i < prob->l; i++)
- if (fabs(f.alpha[i]) > 0) {
- model->SV[j] = prob->x[i];
- model->sv_coef[0][j] = f.alpha[i];
- model->sv_indices[j] = i + 1;
- ++j;
- }
- if (param->probability &&
- (param->svm_type == EPSILON_SVR ||
- param->svm_type == NU_SVR)) {
- model->probA = Malloc(double, 1);
- model->probA[0] = svm_svr_probability(prob, param);
- } else if (param->probability && param->svm_type == ONE_CLASS) {
- int nr_marks = 10;
- double *prob_density_marks = Malloc(double, nr_marks);
- if (svm_one_class_probability(prob, model, prob_density_marks) == 0)
- model->prob_density_marks = prob_density_marks;
- else
- free(prob_density_marks);
- }
- free(f.alpha);
- } else {
- // classification
- int l = prob->l;
- int nr_class;
- int *label = NULL;
- int *start = NULL;
- int *count = NULL;
- int *perm = Malloc(int, l);
- // group training data of the same class
- svm_group_classes(prob, &nr_class, &label, &start, &count, perm);
- if (nr_class == 1)
- info("WARNING: training data in only one class. See README for details.\n");
- svm_node **x = Malloc(svm_node *, l);
- int i;
- for (i = 0; i < l; i++)
- x[i] = prob->x[perm[i]];
- // calculate weighted C
- double *weighted_C = Malloc(double, nr_class);
- for (i = 0; i < nr_class; i++)
- weighted_C[i] = param->C;
- for (i = 0; i < param->nr_weight; i++) {
- int j;
- for (j = 0; j < nr_class; j++)
- if (param->weight_label[i] == label[j])
- break;
- if (j == nr_class)
- fprintf(stderr, "WARNING: class label %d specified in weight is not found\n", param->weight_label[i]);
- else
- weighted_C[j] *= param->weight[i];
- }
- // train k*(k-1)/2 models
- bool *nonzero = Malloc(bool, l);
- for (i = 0; i < l; i++)
- nonzero[i] = false;
- decision_function *f = Malloc(decision_function, nr_class * (nr_class - 1) / 2);
- double *probA = NULL, *probB = NULL;
- if (param->probability) {
- probA = Malloc(double, nr_class * (nr_class - 1) / 2);
- probB = Malloc(double, nr_class * (nr_class - 1) / 2);
- }
- int p = 0;
- for (i = 0; i < nr_class; i++)
- for (int j = i + 1; j < nr_class; j++) {
- svm_problem sub_prob;
- int si = start[i], sj = start[j];
- int ci = count[i], cj = count[j];
- sub_prob.l = ci + cj;
- sub_prob.x = Malloc(svm_node *, sub_prob.l);
- sub_prob.y = Malloc(double, sub_prob.l);
- int k;
- for (k = 0; k < ci; k++) {
- sub_prob.x[k] = x[si + k];
- sub_prob.y[k] = +1;
- }
- for (k = 0; k < cj; k++) {
- sub_prob.x[ci + k] = x[sj + k];
- sub_prob.y[ci + k] = -1;
- }
- if (param->probability)
- svm_binary_svc_probability(&sub_prob, param, weighted_C[i], weighted_C[j], probA[p], probB[p]);
- f[p] = svm_train_one(&sub_prob, param, weighted_C[i], weighted_C[j]);
- for (k = 0; k < ci; k++)
- if (!nonzero[si + k] && fabs(f[p].alpha[k]) > 0)
- nonzero[si + k] = true;
- for (k = 0; k < cj; k++)
- if (!nonzero[sj + k] && fabs(f[p].alpha[ci + k]) > 0)
- nonzero[sj + k] = true;
- free(sub_prob.x);
- free(sub_prob.y);
- ++p;
- }
- // build output
- model->nr_class = nr_class;
- model->label = Malloc(int, nr_class);
- for (i = 0; i < nr_class; i++)
- model->label[i] = label[i];
- model->rho = Malloc(double, nr_class * (nr_class - 1) / 2);
- for (i = 0; i < nr_class * (nr_class - 1) / 2; i++)
- model->rho[i] = f[i].rho;
- if (param->probability) {
- model->probA = Malloc(double, nr_class * (nr_class - 1) / 2);
- model->probB = Malloc(double, nr_class * (nr_class - 1) / 2);
- for (i = 0; i < nr_class * (nr_class - 1) / 2; i++) {
- model->probA[i] = probA[i];
- model->probB[i] = probB[i];
- }
- } else {
- model->probA = NULL;
- model->probB = NULL;
- }
- model->prob_density_marks = NULL; // for one-class SVM probabilistic outputs only
- int total_sv = 0;
- int *nz_count = Malloc(int, nr_class);
- model->nSV = Malloc(int, nr_class);
- for (i = 0; i < nr_class; i++) {
- int nSV = 0;
- for (int j = 0; j < count[i]; j++)
- if (nonzero[start[i] + j]) {
- ++nSV;
- ++total_sv;
- }
- model->nSV[i] = nSV;
- nz_count[i] = nSV;
- }
- info("Total nSV = %d\n", total_sv);
- model->l = total_sv;
- model->SV = Malloc(svm_node *, total_sv);
- model->sv_indices = Malloc(int, total_sv);
- p = 0;
- for (i = 0; i < l; i++)
- if (nonzero[i]) {
- model->SV[p] = x[i];
- model->sv_indices[p++] = perm[i] + 1;
- }
- int *nz_start = Malloc(int, nr_class);
- nz_start[0] = 0;
- for (i = 1; i < nr_class; i++)
- nz_start[i] = nz_start[i - 1] + nz_count[i - 1];
- model->sv_coef = Malloc(double *, nr_class - 1);
- for (i = 0; i < nr_class - 1; i++)
- model->sv_coef[i] = Malloc(double, total_sv);
- p = 0;
- for (i = 0; i < nr_class; i++)
- for (int j = i + 1; j < nr_class; j++) {
- // classifier (i,j): coefficients with
- // i are in sv_coef[j-1][nz_start[i]...],
- // j are in sv_coef[i][nz_start[j]...]
- int si = start[i];
- int sj = start[j];
- int ci = count[i];
- int cj = count[j];
- int q = nz_start[i];
- int k;
- for (k = 0; k < ci; k++)
- if (nonzero[si + k])
- model->sv_coef[j - 1][q++] = f[p].alpha[k];
- q = nz_start[j];
- for (k = 0; k < cj; k++)
- if (nonzero[sj + k])
- model->sv_coef[i][q++] = f[p].alpha[ci + k];
- ++p;
- }
- free(label);
- free(probA);
- free(probB);
- free(count);
- free(perm);
- free(start);
- free(x);
- free(weighted_C);
- free(nonzero);
- for (i = 0; i < nr_class * (nr_class - 1) / 2; i++)
- free(f[i].alpha);
- free(f);
- free(nz_count);
- free(nz_start);
- }
- return model;
- }
- // Stratified cross validation
- void svm_cross_validation(const svm_problem *prob, const svm_parameter *param, int nr_fold, double *target) {
- int i;
- int *fold_start;
- int l = prob->l;
- int *perm = Malloc(int, l);
- int nr_class;
- if (nr_fold > l) {
- fprintf(stderr,
- "WARNING: # folds (%d) > # data (%d). Will use # folds = # data instead (i.e., leave-one-out cross validation)\n",
- nr_fold, l);
- nr_fold = l;
- }
- fold_start = Malloc(int, nr_fold + 1);
- // stratified cv may not give leave-one-out rate
- // Each class to l folds -> some folds may have zero elements
- if ((param->svm_type == C_SVC ||
- param->svm_type == NU_SVC) && nr_fold < l) {
- int *start = NULL;
- int *label = NULL;
- int *count = NULL;
- svm_group_classes(prob, &nr_class, &label, &start, &count, perm);
- // random shuffle and then data grouped by fold using the array perm
- int *fold_count = Malloc(int, nr_fold);
- int c;
- int *index = Malloc(int, l);
- for (i = 0; i < l; i++)
- index[i] = perm[i];
- for (c = 0; c < nr_class; c++)
- for (i = 0; i < count[c]; i++) {
- int j = i + rand() % (count[c] - i);
- swap(index[start[c] + j], index[start[c] + i]);
- }
- for (i = 0; i < nr_fold; i++) {
- fold_count[i] = 0;
- for (c = 0; c < nr_class; c++)
- fold_count[i] += (i + 1) * count[c] / nr_fold - i * count[c] / nr_fold;
- }
- fold_start[0] = 0;
- for (i = 1; i <= nr_fold; i++)
- fold_start[i] = fold_start[i - 1] + fold_count[i - 1];
- for (c = 0; c < nr_class; c++)
- for (i = 0; i < nr_fold; i++) {
- int begin = start[c] + i * count[c] / nr_fold;
- int end = start[c] + (i + 1) * count[c] / nr_fold;
- for (int j = begin; j < end; j++) {
- perm[fold_start[i]] = index[j];
- fold_start[i]++;
- }
- }
- fold_start[0] = 0;
- for (i = 1; i <= nr_fold; i++)
- fold_start[i] = fold_start[i - 1] + fold_count[i - 1];
- free(start);
- free(label);
- free(count);
- free(index);
- free(fold_count);
- } else {
- for (i = 0; i < l; i++) perm[i] = i;
- for (i = 0; i < l; i++) {
- int j = i + rand() % (l - i);
- swap(perm[i], perm[j]);
- }
- for (i = 0; i <= nr_fold; i++)
- fold_start[i] = i * l / nr_fold;
- }
- for (i = 0; i < nr_fold; i++) {
- int begin = fold_start[i];
- int end = fold_start[i + 1];
- int j, k;
- struct svm_problem subprob;
- subprob.l = l - (end - begin);
- subprob.x = Malloc(struct svm_node*, subprob.l);
- subprob.y = Malloc(double, subprob.l);
- k = 0;
- for (j = 0; j < begin; j++) {
- subprob.x[k] = prob->x[perm[j]];
- subprob.y[k] = prob->y[perm[j]];
- ++k;
- }
- for (j = end; j < l; j++) {
- subprob.x[k] = prob->x[perm[j]];
- subprob.y[k] = prob->y[perm[j]];
- ++k;
- }
- struct svm_model *submodel = svm_train(&subprob, param);
- if (param->probability &&
- (param->svm_type == C_SVC || param->svm_type == NU_SVC)) {
- double *prob_estimates = Malloc(double, svm_get_nr_class(submodel));
- for (j = begin; j < end; j++)
- target[perm[j]] = svm_predict_probability(submodel, prob->x[perm[j]], prob_estimates);
- free(prob_estimates);
- } else
- for (j = begin; j < end; j++)
- target[perm[j]] = svm_predict(submodel, prob->x[perm[j]]);
- svm_free_and_destroy_model(&submodel);
- free(subprob.x);
- free(subprob.y);
- }
- free(fold_start);
- free(perm);
- }
- int svm_get_svm_type(const svm_model *model) {
- return model->param.svm_type;
- }
- int svm_get_nr_class(const svm_model *model) {
- return model->nr_class;
- }
- void svm_get_labels(const svm_model *model, int *label) {
- if (model->label != NULL)
- for (int i = 0; i < model->nr_class; i++)
- label[i] = model->label[i];
- }
- void svm_get_sv_indices(const svm_model *model, int *indices) {
- if (model->sv_indices != NULL)
- for (int i = 0; i < model->l; i++)
- indices[i] = model->sv_indices[i];
- }
- int svm_get_nr_sv(const svm_model *model) {
- return model->l;
- }
- double svm_get_svr_probability(const svm_model *model) {
- if ((model->param.svm_type == EPSILON_SVR || model->param.svm_type == NU_SVR) &&
- model->probA != NULL)
- return model->probA[0];
- else {
- fprintf(stderr, "Model doesn't contain information for SVR probability inference\n");
- return 0;
- }
- }
- double svm_predict_values(const svm_model *model, const svm_node *x, double *dec_values) {
- int i;
- if (model->param.svm_type == ONE_CLASS ||
- model->param.svm_type == EPSILON_SVR ||
- model->param.svm_type == NU_SVR) {
- double *sv_coef = model->sv_coef[0];
- double sum = 0;
- #ifdef _OPENMP
- #pragma omp parallel for private(i) reduction(+:sum) schedule(guided)
- #endif
- for (i = 0; i < model->l; i++)
- sum += sv_coef[i] * Kernel::k_function(x, model->SV[i], model->param);
- sum -= model->rho[0];
- *dec_values = sum;
- if (model->param.svm_type == ONE_CLASS)
- return (sum > 0) ? 1 : -1;
- else
- return sum;
- } else {
- int nr_class = model->nr_class;
- int l = model->l;
- double *kvalue = Malloc(double, l);
- #ifdef _OPENMP
- #pragma omp parallel for private(i) schedule(guided)
- #endif
- for (i = 0; i < l; i++)
- kvalue[i] = Kernel::k_function(x, model->SV[i], model->param);
- int *start = Malloc(int, nr_class);
- start[0] = 0;
- for (i = 1; i < nr_class; i++)
- start[i] = start[i - 1] + model->nSV[i - 1];
- int *vote = Malloc(int, nr_class);
- for (i = 0; i < nr_class; i++)
- vote[i] = 0;
- int p = 0;
- for (i = 0; i < nr_class; i++)
- for (int j = i + 1; j < nr_class; j++) {
- double sum = 0;
- int si = start[i];
- int sj = start[j];
- int ci = model->nSV[i];
- int cj = model->nSV[j];
- int k;
- double *coef1 = model->sv_coef[j - 1];
- double *coef2 = model->sv_coef[i];
- for (k = 0; k < ci; k++)
- sum += coef1[si + k] * kvalue[si + k];
- for (k = 0; k < cj; k++)
- sum += coef2[sj + k] * kvalue[sj + k];
- sum -= model->rho[p];
- dec_values[p] = sum;
- if (dec_values[p] > 0)
- ++vote[i];
- else
- ++vote[j];
- p++;
- }
- int vote_max_idx = 0;
- for (i = 1; i < nr_class; i++)
- if (vote[i] > vote[vote_max_idx])
- vote_max_idx = i;
- free(kvalue);
- free(start);
- free(vote);
- return model->label[vote_max_idx];
- }
- }
- double svm_predict(const svm_model *model, const svm_node *x) {
- int nr_class = model->nr_class;
- double *dec_values;
- if (model->param.svm_type == ONE_CLASS ||
- model->param.svm_type == EPSILON_SVR ||
- model->param.svm_type == NU_SVR)
- dec_values = Malloc(double, 1);
- else
- dec_values = Malloc(double, nr_class * (nr_class - 1) / 2);
- double pred_result = svm_predict_values(model, x, dec_values);
- free(dec_values);
- return pred_result;
- }
- double svm_predict_probability(
- const svm_model *model, const svm_node *x, double *prob_estimates) {
- if ((model->param.svm_type == C_SVC || model->param.svm_type == NU_SVC) &&
- model->probA != NULL && model->probB != NULL) {
- int i;
- int nr_class = model->nr_class;
- double *dec_values = Malloc(double, nr_class * (nr_class - 1) / 2);
- svm_predict_values(model, x, dec_values);
- double min_prob = 1e-7;
- double **pairwise_prob = Malloc(double *, nr_class);
- for (i = 0; i < nr_class; i++)
- pairwise_prob[i] = Malloc(double, nr_class);
- int k = 0;
- for (i = 0; i < nr_class; i++)
- for (int j = i + 1; j < nr_class; j++) {
- pairwise_prob[i][j] = min(
- max(sigmoid_predict(dec_values[k], model->probA[k], model->probB[k]), min_prob), 1 - min_prob);
- pairwise_prob[j][i] = 1 - pairwise_prob[i][j];
- k++;
- }
- if (nr_class == 2) {
- prob_estimates[0] = pairwise_prob[0][1];
- prob_estimates[1] = pairwise_prob[1][0];
- } else
- multiclass_probability(nr_class, pairwise_prob, prob_estimates);
- int prob_max_idx = 0;
- for (i = 1; i < nr_class; i++)
- if (prob_estimates[i] > prob_estimates[prob_max_idx])
- prob_max_idx = i;
- for (i = 0; i < nr_class; i++)
- free(pairwise_prob[i]);
- free(dec_values);
- free(pairwise_prob);
- return model->label[prob_max_idx];
- } else if (model->param.svm_type == ONE_CLASS && model->prob_density_marks != NULL) {
- double dec_value;
- double pred_result = svm_predict_values(model, x, &dec_value);
- prob_estimates[0] = predict_one_class_probability(model, dec_value);
- prob_estimates[1] = 1 - prob_estimates[0];
- return pred_result;
- } else
- return svm_predict(model, x);
- }
- static const char *svm_type_table[] =
- {
- "c_svc", "nu_svc", "one_class", "epsilon_svr", "nu_svr", NULL
- };
- static const char *kernel_type_table[] =
- {
- "linear", "polynomial", "rbf", "sigmoid", "precomputed", NULL
- };
- int svm_save_model(const char *model_file_name, const svm_model *model) {
- FILE *fp = fopen(model_file_name, "w");
- if (fp == NULL) return -1;
- char *old_locale = setlocale(LC_ALL, NULL);
- if (old_locale) {
- old_locale = strdup(old_locale);
- }
- setlocale(LC_ALL, "C");
- const svm_parameter ¶m = model->param;
- fprintf(fp, "svm_type %s\n", svm_type_table[param.svm_type]);
- fprintf(fp, "kernel_type %s\n", kernel_type_table[param.kernel_type]);
- if (param.kernel_type == POLY)
- fprintf(fp, "degree %d\n", param.degree);
- if (param.kernel_type == POLY || param.kernel_type == RBF || param.kernel_type == SIGMOID)
- fprintf(fp, "gamma %.17g\n", param.gamma);
- if (param.kernel_type == POLY || param.kernel_type == SIGMOID)
- fprintf(fp, "coef0 %.17g\n", param.coef0);
- int nr_class = model->nr_class;
- int l = model->l;
- fprintf(fp, "nr_class %d\n", nr_class);
- fprintf(fp, "total_sv %d\n", l);
- {
- fprintf(fp, "rho");
- for (int i = 0; i < nr_class * (nr_class - 1) / 2; i++)
- fprintf(fp, " %.17g", model->rho[i]);
- fprintf(fp, "\n");
- }
- if (model->label) {
- fprintf(fp, "label");
- for (int i = 0; i < nr_class; i++)
- fprintf(fp, " %d", model->label[i]);
- fprintf(fp, "\n");
- }
- if (model->probA) // regression has probA only
- {
- fprintf(fp, "probA");
- for (int i = 0; i < nr_class * (nr_class - 1) / 2; i++)
- fprintf(fp, " %.17g", model->probA[i]);
- fprintf(fp, "\n");
- }
- if (model->probB) {
- fprintf(fp, "probB");
- for (int i = 0; i < nr_class * (nr_class - 1) / 2; i++)
- fprintf(fp, " %.17g", model->probB[i]);
- fprintf(fp, "\n");
- }
- if (model->prob_density_marks) {
- fprintf(fp, "prob_density_marks");
- int nr_marks = 10;
- for (int i = 0; i < nr_marks; i++)
- fprintf(fp, " %.17g", model->prob_density_marks[i]);
- fprintf(fp, "\n");
- }
- if (model->nSV) {
- fprintf(fp, "nr_sv");
- for (int i = 0; i < nr_class; i++)
- fprintf(fp, " %d", model->nSV[i]);
- fprintf(fp, "\n");
- }
- fprintf(fp, "SV\n");
- const double *const *sv_coef = model->sv_coef;
- const svm_node *const *SV = model->SV;
- for (int i = 0; i < l; i++) {
- for (int j = 0; j < nr_class - 1; j++)
- fprintf(fp, "%.17g ", sv_coef[j][i]);
- const svm_node *p = SV[i];
- if (param.kernel_type == PRECOMPUTED)
- fprintf(fp, "0:%d ", (int) (p->value));
- else
- while (p->index != -1) {
- fprintf(fp, "%d:%.8g ", p->index, p->value);
- p++;
- }
- fprintf(fp, "\n");
- }
- setlocale(LC_ALL, old_locale);
- free(old_locale);
- if (ferror(fp) != 0 || fclose(fp) != 0) return -1;
- else return 0;
- }
- static char *line = NULL;
- static int max_line_len;
- static char *readline(FILE *input) {
- int len;
- if (fgets(line, max_line_len, input) == NULL)
- return NULL;
- while (strrchr(line, '\n') == NULL) {
- max_line_len *= 2;
- line = (char *) realloc(line, max_line_len);
- len = (int) strlen(line);
- if (fgets(line + len, max_line_len - len, input) == NULL)
- break;
- }
- return line;
- }
- //
- // FSCANF helps to handle fscanf failures.
- // Its do-while block avoids the ambiguity when
- // if (...)
- // FSCANF();
- // is used
- //
- #define FSCANF(_stream, _format, _var) do{ if (fscanf(_stream, _format, _var) != 1) return false; }while(0)
- bool read_model_header(FILE *fp, svm_model *model) {
- svm_parameter ¶m = model->param;
- // parameters for training only won't be assigned, but arrays are assigned as NULL for safety
- param.nr_weight = 0;
- param.weight_label = NULL;
- param.weight = NULL;
- char cmd[81];
- while (1) {
- FSCANF(fp, "%80s", cmd);
- if (strcmp(cmd, "svm_type") == 0) {
- FSCANF(fp, "%80s", cmd);
- int i;
- for (i = 0; svm_type_table[i]; i++) {
- if (strcmp(svm_type_table[i], cmd) == 0) {
- param.svm_type = i;
- break;
- }
- }
- if (svm_type_table[i] == NULL) {
- fprintf(stderr, "unknown svm type.\n");
- return false;
- }
- } else if (strcmp(cmd, "kernel_type") == 0) {
- FSCANF(fp, "%80s", cmd);
- int i;
- for (i = 0; kernel_type_table[i]; i++) {
- if (strcmp(kernel_type_table[i], cmd) == 0) {
- param.kernel_type = i;
- break;
- }
- }
- if (kernel_type_table[i] == NULL) {
- fprintf(stderr, "unknown kernel function.\n");
- return false;
- }
- } else if (strcmp(cmd, "degree") == 0)
- FSCANF(fp, "%d", ¶m.degree);
- else if (strcmp(cmd, "gamma") == 0)
- FSCANF(fp, "%lf", ¶m.gamma);
- else if (strcmp(cmd, "coef0") == 0)
- FSCANF(fp, "%lf", ¶m.coef0);
- else if (strcmp(cmd, "nr_class") == 0)
- FSCANF(fp, "%d", &model->nr_class);
- else if (strcmp(cmd, "total_sv") == 0)
- FSCANF(fp, "%d", &model->l);
- else if (strcmp(cmd, "rho") == 0) {
- int n = model->nr_class * (model->nr_class - 1) / 2;
- model->rho = Malloc(double, n);
- for (int i = 0; i < n; i++)
- FSCANF(fp, "%lf", &model->rho[i]);
- } else if (strcmp(cmd, "label") == 0) {
- int n = model->nr_class;
- model->label = Malloc(int, n);
- for (int i = 0; i < n; i++)
- FSCANF(fp, "%d", &model->label[i]);
- } else if (strcmp(cmd, "probA") == 0) {
- int n = model->nr_class * (model->nr_class - 1) / 2;
- model->probA = Malloc(double, n);
- for (int i = 0; i < n; i++)
- FSCANF(fp, "%lf", &model->probA[i]);
- } else if (strcmp(cmd, "probB") == 0) {
- int n = model->nr_class * (model->nr_class - 1) / 2;
- model->probB = Malloc(double, n);
- for (int i = 0; i < n; i++)
- FSCANF(fp, "%lf", &model->probB[i]);
- } else if (strcmp(cmd, "prob_density_marks") == 0) {
- int n = 10; // nr_marks
- model->prob_density_marks = Malloc(double, n);
- for (int i = 0; i < n; i++)
- FSCANF(fp, "%lf", &model->prob_density_marks[i]);
- } else if (strcmp(cmd, "nr_sv") == 0) {
- int n = model->nr_class;
- model->nSV = Malloc(int, n);
- for (int i = 0; i < n; i++)
- FSCANF(fp, "%d", &model->nSV[i]);
- } else if (strcmp(cmd, "SV") == 0) {
- while (1) {
- int c = getc(fp);
- if (c == EOF || c == '\n') break;
- }
- break;
- } else {
- fprintf(stderr, "unknown text in model file: [%s]\n", cmd);
- return false;
- }
- }
- return true;
- }
- svm_model *svm_load_model(const char *model_file_name) {
- FILE *fp = fopen(model_file_name, "rb");
- if (fp == NULL) return NULL;
- char *old_locale = setlocale(LC_ALL, NULL);
- if (old_locale) {
- old_locale = strdup(old_locale);
- }
- setlocale(LC_ALL, "C");
- // read parameters
- svm_model *model = Malloc(svm_model, 1);
- model->rho = NULL;
- model->probA = NULL;
- model->probB = NULL;
- model->prob_density_marks = NULL;
- model->sv_indices = NULL;
- model->label = NULL;
- model->nSV = NULL;
- // read header
- if (!read_model_header(fp, model)) {
- fprintf(stderr, "ERROR: fscanf failed to read model\n");
- setlocale(LC_ALL, old_locale);
- free(old_locale);
- free(model->rho);
- free(model->label);
- free(model->nSV);
- free(model);
- return NULL;
- }
- // read sv_coef and SV
- int elements = 0;
- long pos = ftell(fp);
- max_line_len = 1024;
- line = Malloc(char, max_line_len);
- char *p, *endptr, *idx, *val;
- while (readline(fp) != NULL) {
- p = strtok(line, ":");
- while (1) {
- p = strtok(NULL, ":");
- if (p == NULL)
- break;
- ++elements;
- }
- }
- elements += model->l;
- fseek(fp, pos, SEEK_SET);
- int m = model->nr_class - 1;
- int l = model->l;
- model->sv_coef = Malloc(double *, m);
- int i;
- for (i = 0; i < m; i++)
- model->sv_coef[i] = Malloc(double, l);
- model->SV = Malloc(svm_node*, l);
- svm_node *x_space = NULL;
- if (l > 0) x_space = Malloc(svm_node, elements);
- int j = 0;
- for (i = 0; i < l; i++) {
- readline(fp);
- model->SV[i] = &x_space[j];
- p = strtok(line, " \t");
- model->sv_coef[0][i] = strtod(p, &endptr);
- for (int k = 1; k < m; k++) {
- p = strtok(NULL, " \t");
- model->sv_coef[k][i] = strtod(p, &endptr);
- }
- while (1) {
- idx = strtok(NULL, ":");
- val = strtok(NULL, " \t");
- if (val == NULL)
- break;
- x_space[j].index = (int) strtol(idx, &endptr, 10);
- x_space[j].value = strtod(val, &endptr);
- ++j;
- }
- x_space[j++].index = -1;
- }
- free(line);
- setlocale(LC_ALL, old_locale);
- free(old_locale);
- if (ferror(fp) != 0 || fclose(fp) != 0)
- return NULL;
- model->free_sv = 1; // XXX
- return model;
- }
- void svm_free_model_content(svm_model *model_ptr) {
- if (model_ptr->free_sv && model_ptr->l > 0 && model_ptr->SV != NULL)
- free((void *) (model_ptr->SV[0]));
- if (model_ptr->sv_coef) {
- for (int i = 0; i < model_ptr->nr_class - 1; i++)
- free(model_ptr->sv_coef[i]);
- }
- free(model_ptr->SV);
- model_ptr->SV = NULL;
- free(model_ptr->sv_coef);
- model_ptr->sv_coef = NULL;
- free(model_ptr->rho);
- model_ptr->rho = NULL;
- free(model_ptr->label);
- model_ptr->label = NULL;
- free(model_ptr->probA);
- model_ptr->probA = NULL;
- free(model_ptr->probB);
- model_ptr->probB = NULL;
- free(model_ptr->prob_density_marks);
- model_ptr->prob_density_marks = NULL;
- free(model_ptr->sv_indices);
- model_ptr->sv_indices = NULL;
- free(model_ptr->nSV);
- model_ptr->nSV = NULL;
- }
- void svm_free_and_destroy_model(svm_model **model_ptr_ptr) {
- if (model_ptr_ptr != NULL && *model_ptr_ptr != NULL) {
- svm_free_model_content(*model_ptr_ptr);
- free(*model_ptr_ptr);
- *model_ptr_ptr = NULL;
- }
- }
- void svm_destroy_param(svm_parameter *param) {
- free(param->weight_label);
- free(param->weight);
- }
- const char *svm_check_parameter(const svm_problem *prob, const svm_parameter *param) {
- // svm_type
- int svm_type = param->svm_type;
- if (svm_type != C_SVC &&
- svm_type != NU_SVC &&
- svm_type != ONE_CLASS &&
- svm_type != EPSILON_SVR &&
- svm_type != NU_SVR)
- return "unknown svm type";
- // kernel_type, degree
- int kernel_type = param->kernel_type;
- if (kernel_type != LINEAR &&
- kernel_type != POLY &&
- kernel_type != RBF &&
- kernel_type != SIGMOID &&
- kernel_type != PRECOMPUTED)
- return "unknown kernel type";
- if ((kernel_type == POLY || kernel_type == RBF || kernel_type == SIGMOID) &&
- param->gamma < 0)
- return "gamma < 0";
- if (kernel_type == POLY && param->degree < 0)
- return "degree of polynomial kernel < 0";
- // cache_size,eps,C,nu,p,shrinking
- if (param->cache_size <= 0)
- return "cache_size <= 0";
- if (param->eps <= 0)
- return "eps <= 0";
- if (svm_type == C_SVC ||
- svm_type == EPSILON_SVR ||
- svm_type == NU_SVR)
- if (param->C <= 0)
- return "C <= 0";
- if (svm_type == NU_SVC ||
- svm_type == ONE_CLASS ||
- svm_type == NU_SVR)
- if (param->nu <= 0 || param->nu > 1)
- return "nu <= 0 or nu > 1";
- if (svm_type == EPSILON_SVR)
- if (param->p < 0)
- return "p < 0";
- if (param->shrinking != 0 &&
- param->shrinking != 1)
- return "shrinking != 0 and shrinking != 1";
- if (param->probability != 0 &&
- param->probability != 1)
- return "probability != 0 and probability != 1";
- // check whether nu-svc is feasible
- if (svm_type == NU_SVC) {
- int l = prob->l;
- int max_nr_class = 16;
- int nr_class = 0;
- int *label = Malloc(int, max_nr_class);
- int *count = Malloc(int, max_nr_class);
- int i;
- for (i = 0; i < l; i++) {
- int this_label = (int) prob->y[i];
- int j;
- for (j = 0; j < nr_class; j++)
- if (this_label == label[j]) {
- ++count[j];
- break;
- }
- if (j == nr_class) {
- if (nr_class == max_nr_class) {
- max_nr_class *= 2;
- label = (int *) realloc(label, max_nr_class * sizeof(int));
- count = (int *) realloc(count, max_nr_class * sizeof(int));
- }
- label[nr_class] = this_label;
- count[nr_class] = 1;
- ++nr_class;
- }
- }
- for (i = 0; i < nr_class; i++) {
- int n1 = count[i];
- for (int j = i + 1; j < nr_class; j++) {
- int n2 = count[j];
- if (param->nu * (n1 + n2) / 2 > min(n1, n2)) {
- free(label);
- free(count);
- return "specified nu is infeasible";
- }
- }
- }
- free(label);
- free(count);
- }
- return NULL;
- }
- int svm_check_probability_model(const svm_model *model) {
- return
- ((model->param.svm_type == C_SVC || model->param.svm_type == NU_SVC) &&
- model->probA != NULL && model->probB != NULL) ||
- (model->param.svm_type == ONE_CLASS && model->prob_density_marks != NULL) ||
- ((model->param.svm_type == EPSILON_SVR || model->param.svm_type == NU_SVR) &&
- model->probA != NULL);
- }
- void svm_set_print_string_function(void (*print_func)(const char *)) {
- if (print_func == NULL)
- svm_print_string = &print_string_stdout;
- else
- svm_print_string = print_func;
- }
|