/************************************************************************* ALGLIB 3.15.0 (source code generated 2019-02-20) Copyright (c) Sergey Bochkanov (ALGLIB project). >>> SOURCE LICENSE >>> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation (www.fsf.org); either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. A copy of the GNU General Public License is available at http://www.fsf.org/licensing/licenses >>> END OF LICENSE >>> *************************************************************************/ #pragma warning disable 162 #pragma warning disable 164 #pragma warning disable 219 using System; public partial class alglib { /************************************************************************* Buffer object which is used to perform evaluation requests in the multithreaded mode (multiple threads working with same IDW object). This object should be created with idwcreatecalcbuffer(). *************************************************************************/ public class idwcalcbuffer : alglibobject { // // Public declarations // public idwcalcbuffer() { _innerobj = new idw.idwcalcbuffer(); } public override alglib.alglibobject make_copy() { return new idwcalcbuffer((idw.idwcalcbuffer)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private idw.idwcalcbuffer _innerobj; public idw.idwcalcbuffer innerobj { get { return _innerobj; } } public idwcalcbuffer(idw.idwcalcbuffer obj) { _innerobj = obj; } } /************************************************************************* IDW (Inverse Distance Weighting) model object. *************************************************************************/ public class idwmodel : alglibobject { // // Public declarations // public idwmodel() { _innerobj = new idw.idwmodel(); } public override alglib.alglibobject make_copy() { return new idwmodel((idw.idwmodel)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private idw.idwmodel _innerobj; public idw.idwmodel innerobj { get { return _innerobj; } } public idwmodel(idw.idwmodel obj) { _innerobj = obj; } } /************************************************************************* Builder object used to generate IDW (Inverse Distance Weighting) model. *************************************************************************/ public class idwbuilder : alglibobject { // // Public declarations // public idwbuilder() { _innerobj = new idw.idwbuilder(); } public override alglib.alglibobject make_copy() { return new idwbuilder((idw.idwbuilder)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private idw.idwbuilder _innerobj; public idw.idwbuilder innerobj { get { return _innerobj; } } public idwbuilder(idw.idwbuilder obj) { _innerobj = obj; } } /************************************************************************* IDW fitting report: rmserror RMS error avgerror average error maxerror maximum error r2 coefficient of determination, R-squared, 1-RSS/TSS *************************************************************************/ public class idwreport : alglibobject { // // Public declarations // public double rmserror { get { return _innerobj.rmserror; } set { _innerobj.rmserror = value; } } public double avgerror { get { return _innerobj.avgerror; } set { _innerobj.avgerror = value; } } public double maxerror { get { return _innerobj.maxerror; } set { _innerobj.maxerror = value; } } public double r2 { get { return _innerobj.r2; } set { _innerobj.r2 = value; } } public idwreport() { _innerobj = new idw.idwreport(); } public override alglib.alglibobject make_copy() { return new idwreport((idw.idwreport)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private idw.idwreport _innerobj; public idw.idwreport innerobj { get { return _innerobj; } } public idwreport(idw.idwreport obj) { _innerobj = obj; } } /************************************************************************* This function serializes data structure to string. Important properties of s_out: * it contains alphanumeric characters, dots, underscores, minus signs * these symbols are grouped into words, which are separated by spaces and Windows-style (CR+LF) newlines * although serializer uses spaces and CR+LF as separators, you can replace any separator character by arbitrary combination of spaces, tabs, Windows or Unix newlines. It allows flexible reformatting of the string in case you want to include it into text or XML file. But you should not insert separators into the middle of the "words" nor you should change case of letters. * s_out can be freely moved between 32-bit and 64-bit systems, little and big endian machines, and so on. You can serialize structure on 32-bit machine and unserialize it on 64-bit one (or vice versa), or serialize it on SPARC and unserialize on x86. You can also serialize it in C# version of ALGLIB and unserialize in C++ one, and vice versa. *************************************************************************/ public static void idwserialize(idwmodel obj, out string s_out) { alglib.serializer s = new alglib.serializer(); s.alloc_start(); idw.idwalloc(s, obj.innerobj, null); s.sstart_str(); idw.idwserialize(s, obj.innerobj, null); s.stop(); s_out = s.get_string(); } /************************************************************************* This function unserializes data structure from string. *************************************************************************/ public static void idwunserialize(string s_in, out idwmodel obj) { alglib.serializer s = new alglib.serializer(); obj = new idwmodel(); s.ustart_str(s_in); idw.idwunserialize(s, obj.innerobj, null); s.stop(); } /************************************************************************* This function serializes data structure to stream. Data stream generated by this function is same as string representation generated by string version of serializer - alphanumeric characters, dots, underscores, minus signs, which are grouped into words separated by spaces and CR+LF. We recommend you to read comments on string version of serializer to find out more about serialization of AlGLIB objects. *************************************************************************/ public static void idwserialize(idwmodel obj, System.IO.Stream stream_out) { alglib.serializer s = new alglib.serializer(); s.alloc_start(); idw.idwalloc(s, obj.innerobj, null); s.sstart_stream(stream_out); idw.idwserialize(s, obj.innerobj, null); s.stop(); } /************************************************************************* This function unserializes data structure from stream. *************************************************************************/ public static void idwunserialize(System.IO.Stream stream_in, out idwmodel obj) { alglib.serializer s = new alglib.serializer(); obj = new idwmodel(); s.ustart_stream(stream_in); idw.idwunserialize(s, obj.innerobj, null); s.stop(); } /************************************************************************* This function creates buffer structure which can be used to perform parallel IDW model evaluations (with one IDW model instance being used from multiple threads, as long as different threads use different instances of buffer). This buffer object can be used with idwtscalcbuf() function (here "ts" stands for "thread-safe", "buf" is a suffix which denotes function which reuses previously allocated output space). How to use it: * create IDW model structure or load it from file * call idwcreatecalcbuffer(), once per thread working with IDW model (you should call this function only AFTER model initialization, see below for more information) * call idwtscalcbuf() from different threads, with each thread working with its own copy of buffer object. INPUT PARAMETERS S - IDW model OUTPUT PARAMETERS Buf - external buffer. IMPORTANT: buffer object should be used only with IDW model object which was used to initialize buffer. Any attempt to use buffer with different object is dangerous - you may get memory violation error because sizes of internal arrays do not fit to dimensions of the IDW structure. IMPORTANT: you should call this function only for model which was built with model builder (or unserialized from file). Sizes of some internal structures are determined only after model is built, so buffer object created before model construction stage will be useless (and any attempt to use it will result in exception). -- ALGLIB -- Copyright 22.10.2018 by Sergey Bochkanov *************************************************************************/ public static void idwcreatecalcbuffer(idwmodel s, out idwcalcbuffer buf) { buf = new idwcalcbuffer(); idw.idwcreatecalcbuffer(s.innerobj, buf.innerobj, null); } public static void idwcreatecalcbuffer(idwmodel s, out idwcalcbuffer buf, alglib.xparams _params) { buf = new idwcalcbuffer(); idw.idwcreatecalcbuffer(s.innerobj, buf.innerobj, _params); } /************************************************************************* This subroutine creates builder object used to generate IDW model from irregularly sampled (scattered) dataset. Multidimensional scalar/vector- -valued are supported. Builder object is used to fit model to data as follows: * builder object is created with idwbuildercreate() function * dataset is added with idwbuildersetpoints() function * one of the modern IDW algorithms is chosen with either: * idwbuildersetalgomstab() - Multilayer STABilized algorithm (interpolation) Alternatively, one of the textbook algorithms can be chosen (not recommended): * idwbuildersetalgotextbookshepard() - textbook Shepard algorithm * idwbuildersetalgotextbookmodshepard()-textbook modified Shepard algorithm * finally, model construction is performed with idwfit() function. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: NX - dimensionality of the argument, NX>=1 NY - dimensionality of the function being modeled, NY>=1; NY=1 corresponds to classic scalar function, NY>=1 corresponds to vector-valued function. OUTPUT PARAMETERS: State- builder object -- ALGLIB PROJECT -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildercreate(int nx, int ny, out idwbuilder state) { state = new idwbuilder(); idw.idwbuildercreate(nx, ny, state.innerobj, null); } public static void idwbuildercreate(int nx, int ny, out idwbuilder state, alglib.xparams _params) { state = new idwbuilder(); idw.idwbuildercreate(nx, ny, state.innerobj, _params); } /************************************************************************* This function changes number of layers used by IDW-MSTAB algorithm. The more layers you have, the finer details can be reproduced with IDW model. The less layers you have, the less memory and CPU time is consumed by the model. Memory consumption grows linearly with layers count, running time grows sub-linearly. The default number of layers is 16, which allows you to reproduce details at distance down to SRad/65536. You will rarely need to change it. INPUT PARAMETERS: State - builder object NLayers - NLayers>=1, the number of layers used by the model. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetnlayers(idwbuilder state, int nlayers) { idw.idwbuildersetnlayers(state.innerobj, nlayers, null); } public static void idwbuildersetnlayers(idwbuilder state, int nlayers, alglib.xparams _params) { idw.idwbuildersetnlayers(state.innerobj, nlayers, _params); } /************************************************************************* This function adds dataset to the builder object. This function overrides results of the previous calls, i.e. multiple calls of this function will result in only the last set being added. INPUT PARAMETERS: State - builder object XY - points, array[N,NX+NY]. One row corresponds to one point in the dataset. First NX elements are coordinates, next NY elements are function values. Array may be larger than specified, in this case only leading [N,NX+NY] elements will be used. N - number of points in the dataset, N>=0. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetpoints(idwbuilder state, double[,] xy, int n) { idw.idwbuildersetpoints(state.innerobj, xy, n, null); } public static void idwbuildersetpoints(idwbuilder state, double[,] xy, int n, alglib.xparams _params) { idw.idwbuildersetpoints(state.innerobj, xy, n, _params); } public static void idwbuildersetpoints(idwbuilder state, double[,] xy) { int n; n = ap.rows(xy); idw.idwbuildersetpoints(state.innerobj, xy, n, null); return; } public static void idwbuildersetpoints(idwbuilder state, double[,] xy, alglib.xparams _params) { int n; n = ap.rows(xy); idw.idwbuildersetpoints(state.innerobj, xy, n, _params); return; } /************************************************************************* This function sets IDW model construction algorithm to the Multilayer Stabilized IDW method (IDW-MSTAB), a latest incarnation of the inverse distance weighting interpolation which fixes shortcomings of the original and modified Shepard's variants. The distinctive features of IDW-MSTAB are: 1) exact interpolation is pursued (as opposed to fitting and noise suppression) 2) improved robustness when compared with that of other algorithms: * MSTAB shows almost no strange fitting artifacts like ripples and sharp spikes (unlike N-dimensional splines and HRBFs) * MSTAB does not return function values far from the interval spanned by the dataset; say, if all your points have |f|<=1, you can be sure that model value won't deviate too much from [-1,+1] 3) good model construction time competing with that of HRBFs and bicubic splines 4) ability to work with any number of dimensions, starting from NX=1 The drawbacks of IDW-MSTAB (and all IDW algorithms in general) are: 1) dependence of the model evaluation time on the search radius 2) bad extrapolation properties, models built by this method are usually conservative in their predictions Thus, IDW-MSTAB is a good "default" option if you want to perform scattered multidimensional interpolation. Although it has its drawbacks, it is easy to use and robust, which makes it a good first step. INPUT PARAMETERS: State - builder object SRad - initial search radius, SRad>0 is required. A model value is obtained by "smart" averaging of the dataset points within search radius. NOTE 1: IDW interpolation can correctly handle ANY dataset, including datasets with non-distinct points. In case non-distinct points are found, an average value for this point will be calculated. NOTE 2: the memory requirements for model storage are O(NPoints*NLayers). The model construction needs twice as much memory as model storage. NOTE 3: by default 16 IDW layers are built which is enough for most cases. You can change this parameter with idwbuildersetnlayers() method. Larger values may be necessary if you need to reproduce extrafine details at distances smaller than SRad/65536. Smaller value may be necessary if you have to save memory and computing time, and ready to sacrifice some model quality. ALGORITHM DESCRIPTION ALGLIB implementation of IDW is somewhat similar to the modified Shepard's method (one with search radius R) but overcomes several of its drawbacks, namely: 1) a tendency to show stepwise behavior for uniform datasets 2) a tendency to show terrible interpolation properties for highly nonuniform datasets which often arise in geospatial tasks (function values are densely sampled across multiple separated "tracks") IDW-MSTAB method performs several passes over dataset and builds a sequence of progressively refined IDW models (layers), which starts from one with largest search radius SRad and continues to smaller search radii until required number of layers is built. Highest layers reproduce global behavior of the target function at larger distances whilst lower layers reproduce fine details at smaller distances. Each layer is an IDW model built with following modifications: * weights go to zero when distance approach to the current search radius * an additional regularizing term is added to the distance: w=1/(d^2+lambda) * an additional fictional term with unit weight and zero function value is added in order to promote continuity properties at the isolated and boundary points By default, 16 layers is built, which is enough for most cases. You can change this parameter with idwbuildersetnlayers() method. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetalgomstab(idwbuilder state, double srad) { idw.idwbuildersetalgomstab(state.innerobj, srad, null); } public static void idwbuildersetalgomstab(idwbuilder state, double srad, alglib.xparams _params) { idw.idwbuildersetalgomstab(state.innerobj, srad, _params); } /************************************************************************* This function sets IDW model construction algorithm to the textbook Shepard's algorithm with custom (user-specified) power parameter. IMPORTANT: we do NOT recommend using textbook IDW algorithms because they have terrible interpolation properties. Use MSTAB in all cases. INPUT PARAMETERS: State - builder object P - power parameter, P>0; good value to start with is 2.0 NOTE 1: IDW interpolation can correctly handle ANY dataset, including datasets with non-distinct points. In case non-distinct points are found, an average value for this point will be calculated. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetalgotextbookshepard(idwbuilder state, double p) { idw.idwbuildersetalgotextbookshepard(state.innerobj, p, null); } public static void idwbuildersetalgotextbookshepard(idwbuilder state, double p, alglib.xparams _params) { idw.idwbuildersetalgotextbookshepard(state.innerobj, p, _params); } /************************************************************************* This function sets IDW model construction algorithm to the 'textbook' modified Shepard's algorithm with user-specified search radius. IMPORTANT: we do NOT recommend using textbook IDW algorithms because they have terrible interpolation properties. Use MSTAB in all cases. INPUT PARAMETERS: State - builder object R - search radius NOTE 1: IDW interpolation can correctly handle ANY dataset, including datasets with non-distinct points. In case non-distinct points are found, an average value for this point will be calculated. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetalgotextbookmodshepard(idwbuilder state, double r) { idw.idwbuildersetalgotextbookmodshepard(state.innerobj, r, null); } public static void idwbuildersetalgotextbookmodshepard(idwbuilder state, double r, alglib.xparams _params) { idw.idwbuildersetalgotextbookmodshepard(state.innerobj, r, _params); } /************************************************************************* This function sets prior term (model value at infinity) as user-specified value. INPUT PARAMETERS: S - spline builder V - value for user-defined prior NOTE: for vector-valued models all components of the prior are set to same user-specified value -- ALGLIB -- Copyright 29.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetuserterm(idwbuilder state, double v) { idw.idwbuildersetuserterm(state.innerobj, v, null); } public static void idwbuildersetuserterm(idwbuilder state, double v, alglib.xparams _params) { idw.idwbuildersetuserterm(state.innerobj, v, _params); } /************************************************************************* This function sets constant prior term (model value at infinity). Constant prior term is determined as mean value over dataset. INPUT PARAMETERS: S - spline builder -- ALGLIB -- Copyright 29.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetconstterm(idwbuilder state) { idw.idwbuildersetconstterm(state.innerobj, null); } public static void idwbuildersetconstterm(idwbuilder state, alglib.xparams _params) { idw.idwbuildersetconstterm(state.innerobj, _params); } /************************************************************************* This function sets zero prior term (model value at infinity). INPUT PARAMETERS: S - spline builder -- ALGLIB -- Copyright 29.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetzeroterm(idwbuilder state) { idw.idwbuildersetzeroterm(state.innerobj, null); } public static void idwbuildersetzeroterm(idwbuilder state, alglib.xparams _params) { idw.idwbuildersetzeroterm(state.innerobj, _params); } /************************************************************************* IDW interpolation: scalar target, 1-dimensional argument NOTE: this function modifies internal temporaries of the IDW model, thus IT IS NOT THREAD-SAFE! If you want to perform parallel model evaluation from the multiple threads, use idwtscalcbuf() with per- thread buffer object. INPUT PARAMETERS: S - IDW interpolant built with IDW builder X0 - argument value Result: IDW interpolant S(X0) -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static double idwcalc1(idwmodel s, double x0) { return idw.idwcalc1(s.innerobj, x0, null); } public static double idwcalc1(idwmodel s, double x0, alglib.xparams _params) { return idw.idwcalc1(s.innerobj, x0, _params); } /************************************************************************* IDW interpolation: scalar target, 2-dimensional argument NOTE: this function modifies internal temporaries of the IDW model, thus IT IS NOT THREAD-SAFE! If you want to perform parallel model evaluation from the multiple threads, use idwtscalcbuf() with per- thread buffer object. INPUT PARAMETERS: S - IDW interpolant built with IDW builder X0, X1 - argument value Result: IDW interpolant S(X0,X1) -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static double idwcalc2(idwmodel s, double x0, double x1) { return idw.idwcalc2(s.innerobj, x0, x1, null); } public static double idwcalc2(idwmodel s, double x0, double x1, alglib.xparams _params) { return idw.idwcalc2(s.innerobj, x0, x1, _params); } /************************************************************************* IDW interpolation: scalar target, 3-dimensional argument NOTE: this function modifies internal temporaries of the IDW model, thus IT IS NOT THREAD-SAFE! If you want to perform parallel model evaluation from the multiple threads, use idwtscalcbuf() with per- thread buffer object. INPUT PARAMETERS: S - IDW interpolant built with IDW builder X0,X1,X2- argument value Result: IDW interpolant S(X0,X1,X2) -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static double idwcalc3(idwmodel s, double x0, double x1, double x2) { return idw.idwcalc3(s.innerobj, x0, x1, x2, null); } public static double idwcalc3(idwmodel s, double x0, double x1, double x2, alglib.xparams _params) { return idw.idwcalc3(s.innerobj, x0, x1, x2, _params); } /************************************************************************* This function calculates values of the IDW model at the given point. This is general function which can be used for arbitrary NX (dimension of the space of arguments) and NY (dimension of the function itself). However when you have NY=1 you may find more convenient to use idwcalc1(), idwcalc2() or idwcalc3(). NOTE: this function modifies internal temporaries of the IDW model, thus IT IS NOT THREAD-SAFE! If you want to perform parallel model evaluation from the multiple threads, use idwtscalcbuf() with per- thread buffer object. INPUT PARAMETERS: S - IDW model X - coordinates, array[NX]. X may have more than NX elements, in this case only leading NX will be used. OUTPUT PARAMETERS: Y - function value, array[NY]. Y is out-parameter and will be reallocated after call to this function. In case you want to reuse previously allocated Y, you may use idwcalcbuf(), which reallocates Y only when it is too small. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwcalc(idwmodel s, double[] x, out double[] y) { y = new double[0]; idw.idwcalc(s.innerobj, x, ref y, null); } public static void idwcalc(idwmodel s, double[] x, out double[] y, alglib.xparams _params) { y = new double[0]; idw.idwcalc(s.innerobj, x, ref y, _params); } /************************************************************************* This function calculates values of the IDW model at the given point. Same as idwcalc(), but does not reallocate Y when in is large enough to store function values. NOTE: this function modifies internal temporaries of the IDW model, thus IT IS NOT THREAD-SAFE! If you want to perform parallel model evaluation from the multiple threads, use idwtscalcbuf() with per- thread buffer object. INPUT PARAMETERS: S - IDW model X - coordinates, array[NX]. X may have more than NX elements, in this case only leading NX will be used. Y - possibly preallocated array OUTPUT PARAMETERS: Y - function value, array[NY]. Y is not reallocated when it is larger than NY. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwcalcbuf(idwmodel s, double[] x, ref double[] y) { idw.idwcalcbuf(s.innerobj, x, ref y, null); } public static void idwcalcbuf(idwmodel s, double[] x, ref double[] y, alglib.xparams _params) { idw.idwcalcbuf(s.innerobj, x, ref y, _params); } /************************************************************************* This function calculates values of the IDW model at the given point, using external buffer object (internal temporaries of IDW model are not modified). This function allows to use same IDW model object in different threads, assuming that different threads use different instances of the buffer structure. INPUT PARAMETERS: S - IDW model, may be shared between different threads Buf - buffer object created for this particular instance of IDW model with idwcreatecalcbuffer(). X - coordinates, array[NX]. X may have more than NX elements, in this case only leading NX will be used. Y - possibly preallocated array OUTPUT PARAMETERS: Y - function value, array[NY]. Y is not reallocated when it is larger than NY. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void idwtscalcbuf(idwmodel s, idwcalcbuffer buf, double[] x, ref double[] y) { idw.idwtscalcbuf(s.innerobj, buf.innerobj, x, ref y, null); } public static void idwtscalcbuf(idwmodel s, idwcalcbuffer buf, double[] x, ref double[] y, alglib.xparams _params) { idw.idwtscalcbuf(s.innerobj, buf.innerobj, x, ref y, _params); } /************************************************************************* This function fits IDW model to the dataset using current IDW construction algorithm. A model being built and fitting report are returned. INPUT PARAMETERS: State - builder object OUTPUT PARAMETERS: Model - an IDW model built with current algorithm Rep - model fitting report, fields of this structure contain information about average fitting errors. NOTE: although IDW-MSTAB algorithm is an interpolation method, i.e. it tries to fit the model exactly, it can handle datasets with non- distinct points which can not be fit exactly; in such cases least- squares fitting is performed. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwfit(idwbuilder state, out idwmodel model, out idwreport rep) { model = new idwmodel(); rep = new idwreport(); idw.idwfit(state.innerobj, model.innerobj, rep.innerobj, null); } public static void idwfit(idwbuilder state, out idwmodel model, out idwreport rep, alglib.xparams _params) { model = new idwmodel(); rep = new idwreport(); idw.idwfit(state.innerobj, model.innerobj, rep.innerobj, _params); } } public partial class alglib { /************************************************************************* Barycentric interpolant. *************************************************************************/ public class barycentricinterpolant : alglibobject { // // Public declarations // public barycentricinterpolant() { _innerobj = new ratint.barycentricinterpolant(); } public override alglib.alglibobject make_copy() { return new barycentricinterpolant((ratint.barycentricinterpolant)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private ratint.barycentricinterpolant _innerobj; public ratint.barycentricinterpolant innerobj { get { return _innerobj; } } public barycentricinterpolant(ratint.barycentricinterpolant obj) { _innerobj = obj; } } /************************************************************************* Rational interpolation using barycentric formula F(t) = SUM(i=0,n-1,w[i]*f[i]/(t-x[i])) / SUM(i=0,n-1,w[i]/(t-x[i])) Input parameters: B - barycentric interpolant built with one of model building subroutines. T - interpolation point Result: barycentric interpolant F(t) -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static double barycentriccalc(barycentricinterpolant b, double t) { return ratint.barycentriccalc(b.innerobj, t, null); } public static double barycentriccalc(barycentricinterpolant b, double t, alglib.xparams _params) { return ratint.barycentriccalc(b.innerobj, t, _params); } /************************************************************************* Differentiation of barycentric interpolant: first derivative. Algorithm used in this subroutine is very robust and should not fail until provided with values too close to MaxRealNumber (usually MaxRealNumber/N or greater will overflow). INPUT PARAMETERS: B - barycentric interpolant built with one of model building subroutines. T - interpolation point OUTPUT PARAMETERS: F - barycentric interpolant at T DF - first derivative NOTE -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentricdiff1(barycentricinterpolant b, double t, out double f, out double df) { f = 0; df = 0; ratint.barycentricdiff1(b.innerobj, t, ref f, ref df, null); } public static void barycentricdiff1(barycentricinterpolant b, double t, out double f, out double df, alglib.xparams _params) { f = 0; df = 0; ratint.barycentricdiff1(b.innerobj, t, ref f, ref df, _params); } /************************************************************************* Differentiation of barycentric interpolant: first/second derivatives. INPUT PARAMETERS: B - barycentric interpolant built with one of model building subroutines. T - interpolation point OUTPUT PARAMETERS: F - barycentric interpolant at T DF - first derivative D2F - second derivative NOTE: this algorithm may fail due to overflow/underflor if used on data whose values are close to MaxRealNumber or MinRealNumber. Use more robust BarycentricDiff1() subroutine in such cases. -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentricdiff2(barycentricinterpolant b, double t, out double f, out double df, out double d2f) { f = 0; df = 0; d2f = 0; ratint.barycentricdiff2(b.innerobj, t, ref f, ref df, ref d2f, null); } public static void barycentricdiff2(barycentricinterpolant b, double t, out double f, out double df, out double d2f, alglib.xparams _params) { f = 0; df = 0; d2f = 0; ratint.barycentricdiff2(b.innerobj, t, ref f, ref df, ref d2f, _params); } /************************************************************************* This subroutine performs linear transformation of the argument. INPUT PARAMETERS: B - rational interpolant in barycentric form CA, CB - transformation coefficients: x = CA*t + CB OUTPUT PARAMETERS: B - transformed interpolant with X replaced by T -- ALGLIB PROJECT -- Copyright 19.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentriclintransx(barycentricinterpolant b, double ca, double cb) { ratint.barycentriclintransx(b.innerobj, ca, cb, null); } public static void barycentriclintransx(barycentricinterpolant b, double ca, double cb, alglib.xparams _params) { ratint.barycentriclintransx(b.innerobj, ca, cb, _params); } /************************************************************************* This subroutine performs linear transformation of the barycentric interpolant. INPUT PARAMETERS: B - rational interpolant in barycentric form CA, CB - transformation coefficients: B2(x) = CA*B(x) + CB OUTPUT PARAMETERS: B - transformed interpolant -- ALGLIB PROJECT -- Copyright 19.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentriclintransy(barycentricinterpolant b, double ca, double cb) { ratint.barycentriclintransy(b.innerobj, ca, cb, null); } public static void barycentriclintransy(barycentricinterpolant b, double ca, double cb, alglib.xparams _params) { ratint.barycentriclintransy(b.innerobj, ca, cb, _params); } /************************************************************************* Extracts X/Y/W arrays from rational interpolant INPUT PARAMETERS: B - barycentric interpolant OUTPUT PARAMETERS: N - nodes count, N>0 X - interpolation nodes, array[0..N-1] F - function values, array[0..N-1] W - barycentric weights, array[0..N-1] -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentricunpack(barycentricinterpolant b, out int n, out double[] x, out double[] y, out double[] w) { n = 0; x = new double[0]; y = new double[0]; w = new double[0]; ratint.barycentricunpack(b.innerobj, ref n, ref x, ref y, ref w, null); } public static void barycentricunpack(barycentricinterpolant b, out int n, out double[] x, out double[] y, out double[] w, alglib.xparams _params) { n = 0; x = new double[0]; y = new double[0]; w = new double[0]; ratint.barycentricunpack(b.innerobj, ref n, ref x, ref y, ref w, _params); } /************************************************************************* Rational interpolant from X/Y/W arrays F(t) = SUM(i=0,n-1,w[i]*f[i]/(t-x[i])) / SUM(i=0,n-1,w[i]/(t-x[i])) INPUT PARAMETERS: X - interpolation nodes, array[0..N-1] F - function values, array[0..N-1] W - barycentric weights, array[0..N-1] N - nodes count, N>0 OUTPUT PARAMETERS: B - barycentric interpolant built from (X, Y, W) -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentricbuildxyw(double[] x, double[] y, double[] w, int n, out barycentricinterpolant b) { b = new barycentricinterpolant(); ratint.barycentricbuildxyw(x, y, w, n, b.innerobj, null); } public static void barycentricbuildxyw(double[] x, double[] y, double[] w, int n, out barycentricinterpolant b, alglib.xparams _params) { b = new barycentricinterpolant(); ratint.barycentricbuildxyw(x, y, w, n, b.innerobj, _params); } /************************************************************************* Rational interpolant without poles The subroutine constructs the rational interpolating function without real poles (see 'Barycentric rational interpolation with no poles and high rates of approximation', Michael S. Floater. and Kai Hormann, for more information on this subject). Input parameters: X - interpolation nodes, array[0..N-1]. Y - function values, array[0..N-1]. N - number of nodes, N>0. D - order of the interpolation scheme, 0 <= D <= N-1. D<0 will cause an error. D>=N it will be replaced with D=N-1. if you don't know what D to choose, use small value about 3-5. Output parameters: B - barycentric interpolant. Note: this algorithm always succeeds and calculates the weights with close to machine precision. -- ALGLIB PROJECT -- Copyright 17.06.2007 by Bochkanov Sergey *************************************************************************/ public static void barycentricbuildfloaterhormann(double[] x, double[] y, int n, int d, out barycentricinterpolant b) { b = new barycentricinterpolant(); ratint.barycentricbuildfloaterhormann(x, y, n, d, b.innerobj, null); } public static void barycentricbuildfloaterhormann(double[] x, double[] y, int n, int d, out barycentricinterpolant b, alglib.xparams _params) { b = new barycentricinterpolant(); ratint.barycentricbuildfloaterhormann(x, y, n, d, b.innerobj, _params); } } public partial class alglib { /************************************************************************* Fits least squares (LS) circle (or NX-dimensional sphere) to data (a set of points in NX-dimensional space). Least squares circle minimizes sum of squared deviations between distances from points to the center and some "candidate" radius, which is also fitted to the data. INPUT PARAMETERS: XY - array[NPoints,NX] (or larger), contains dataset. One row = one point in NX-dimensional space. NPoints - dataset size, NPoints>0 NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) OUTPUT PARAMETERS: CX - central point for a sphere R - radius -- ALGLIB -- Copyright 07.05.2018 by Bochkanov Sergey *************************************************************************/ public static void fitspherels(double[,] xy, int npoints, int nx, out double[] cx, out double r) { cx = new double[0]; r = 0; fitsphere.fitspherels(xy, npoints, nx, ref cx, ref r, null); } public static void fitspherels(double[,] xy, int npoints, int nx, out double[] cx, out double r, alglib.xparams _params) { cx = new double[0]; r = 0; fitsphere.fitspherels(xy, npoints, nx, ref cx, ref r, _params); } /************************************************************************* Fits minimum circumscribed (MC) circle (or NX-dimensional sphere) to data (a set of points in NX-dimensional space). INPUT PARAMETERS: XY - array[NPoints,NX] (or larger), contains dataset. One row = one point in NX-dimensional space. NPoints - dataset size, NPoints>0 NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) OUTPUT PARAMETERS: CX - central point for a sphere RHi - radius NOTE: this function is an easy-to-use wrapper around more powerful "expert" function fitspherex(). This wrapper is optimized for ease of use and stability - at the cost of somewhat lower performance (we have to use very tight stopping criteria for inner optimizer because we want to make sure that it will converge on any dataset). If you are ready to experiment with settings of "expert" function, you can achieve ~2-4x speedup over standard "bulletproof" settings. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void fitspheremc(double[,] xy, int npoints, int nx, out double[] cx, out double rhi) { cx = new double[0]; rhi = 0; fitsphere.fitspheremc(xy, npoints, nx, ref cx, ref rhi, null); } public static void fitspheremc(double[,] xy, int npoints, int nx, out double[] cx, out double rhi, alglib.xparams _params) { cx = new double[0]; rhi = 0; fitsphere.fitspheremc(xy, npoints, nx, ref cx, ref rhi, _params); } /************************************************************************* Fits maximum inscribed circle (or NX-dimensional sphere) to data (a set of points in NX-dimensional space). INPUT PARAMETERS: XY - array[NPoints,NX] (or larger), contains dataset. One row = one point in NX-dimensional space. NPoints - dataset size, NPoints>0 NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) OUTPUT PARAMETERS: CX - central point for a sphere RLo - radius NOTE: this function is an easy-to-use wrapper around more powerful "expert" function fitspherex(). This wrapper is optimized for ease of use and stability - at the cost of somewhat lower performance (we have to use very tight stopping criteria for inner optimizer because we want to make sure that it will converge on any dataset). If you are ready to experiment with settings of "expert" function, you can achieve ~2-4x speedup over standard "bulletproof" settings. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void fitspheremi(double[,] xy, int npoints, int nx, out double[] cx, out double rlo) { cx = new double[0]; rlo = 0; fitsphere.fitspheremi(xy, npoints, nx, ref cx, ref rlo, null); } public static void fitspheremi(double[,] xy, int npoints, int nx, out double[] cx, out double rlo, alglib.xparams _params) { cx = new double[0]; rlo = 0; fitsphere.fitspheremi(xy, npoints, nx, ref cx, ref rlo, _params); } /************************************************************************* Fits minimum zone circle (or NX-dimensional sphere) to data (a set of points in NX-dimensional space). INPUT PARAMETERS: XY - array[NPoints,NX] (or larger), contains dataset. One row = one point in NX-dimensional space. NPoints - dataset size, NPoints>0 NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) OUTPUT PARAMETERS: CX - central point for a sphere RLo - radius of inscribed circle RHo - radius of circumscribed circle NOTE: this function is an easy-to-use wrapper around more powerful "expert" function fitspherex(). This wrapper is optimized for ease of use and stability - at the cost of somewhat lower performance (we have to use very tight stopping criteria for inner optimizer because we want to make sure that it will converge on any dataset). If you are ready to experiment with settings of "expert" function, you can achieve ~2-4x speedup over standard "bulletproof" settings. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void fitspheremz(double[,] xy, int npoints, int nx, out double[] cx, out double rlo, out double rhi) { cx = new double[0]; rlo = 0; rhi = 0; fitsphere.fitspheremz(xy, npoints, nx, ref cx, ref rlo, ref rhi, null); } public static void fitspheremz(double[,] xy, int npoints, int nx, out double[] cx, out double rlo, out double rhi, alglib.xparams _params) { cx = new double[0]; rlo = 0; rhi = 0; fitsphere.fitspheremz(xy, npoints, nx, ref cx, ref rlo, ref rhi, _params); } /************************************************************************* Fitting minimum circumscribed, maximum inscribed or minimum zone circles (or NX-dimensional spheres) to data (a set of points in NX-dimensional space). This is expert function which allows to tweak many parameters of underlying nonlinear solver: * stopping criteria for inner iterations * number of outer iterations * penalty coefficient used to handle nonlinear constraints (we convert unconstrained nonsmooth optimization problem ivolving max() and/or min() operations to quadratically constrained smooth one). You may tweak all these parameters or only some of them, leaving other ones at their default state - just specify zero value, and solver will fill it with appropriate default one. These comments also include some discussion of approach used to handle such unusual fitting problem, its stability, drawbacks of alternative methods, and convergence properties. INPUT PARAMETERS: XY - array[NPoints,NX] (or larger), contains dataset. One row = one point in NX-dimensional space. NPoints - dataset size, NPoints>0 NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) ProblemType-used to encode problem type: * 0 for least squares circle * 1 for minimum circumscribed circle/sphere fitting (MC) * 2 for maximum inscribed circle/sphere fitting (MI) * 3 for minimum zone circle fitting (difference between Rhi and Rlo is minimized), denoted as MZ EpsX - stopping condition for NLC optimizer: * must be non-negative * use 0 to choose default value (1.0E-12 is used by default) * you may specify larger values, up to 1.0E-6, if you want to speed-up solver; NLC solver performs several preconditioned outer iterations, so final result typically has precision much better than EpsX. AULIts - number of outer iterations performed by NLC optimizer: * must be non-negative * use 0 to choose default value (20 is used by default) * you may specify values smaller than 20 if you want to speed up solver; 10 often results in good combination of precision and speed; sometimes you may get good results with just 6 outer iterations. Ignored for ProblemType=0. Penalty - penalty coefficient for NLC optimizer: * must be non-negative * use 0 to choose default value (1.0E6 in current version) * it should be really large, 1.0E6...1.0E7 is a good value to start from; * generally, default value is good enough Ignored for ProblemType=0. OUTPUT PARAMETERS: CX - central point for a sphere RLo - radius: * for ProblemType=2,3, radius of the inscribed sphere * for ProblemType=0 - radius of the least squares sphere * for ProblemType=1 - zero RHo - radius: * for ProblemType=1,3, radius of the circumscribed sphere * for ProblemType=0 - radius of the least squares sphere * for ProblemType=2 - zero NOTE: ON THE UNIQUENESS OF SOLUTIONS ALGLIB provides solution to several related circle fitting problems: MC (minimum circumscribed), MI (maximum inscribed) and MZ (minimum zone) fitting, LS (least squares) fitting. It is important to note that among these problems only MC and LS are convex and have unique solution independently from starting point. As for MI, it may (or may not, depending on dataset properties) have multiple solutions, and it always has one degenerate solution C=infinity which corresponds to infinitely large radius. Thus, there are no guarantees that solution to MI returned by this solver will be the best one (and no one can provide you with such guarantee because problem is NP-hard). The only guarantee you have is that this solution is locally optimal, i.e. it can not be improved by infinitesimally small tweaks in the parameters. It is also possible to "run away" to infinity when started from bad initial point located outside of point cloud (or when point cloud does not span entire circumference/surface of the sphere). Finally, MZ (minimum zone circle) stands somewhere between MC and MI in stability. It is somewhat regularized by "circumscribed" term of the merit function; however, solutions to MZ may be non-unique, and in some unlucky cases it is also possible to "run away to infinity". NOTE: ON THE NONLINEARLY CONSTRAINED PROGRAMMING APPROACH The problem formulation for MC (minimum circumscribed circle; for the sake of simplicity we omit MZ and MI here) is: [ [ ]2 ] min [ max [ XY[i]-C ] ] C [ i [ ] ] i.e. it is unconstrained nonsmooth optimization problem of finding "best" central point, with radius R being unambiguously determined from C. In order to move away from non-smoothness we use following reformulation: [ ] [ ]2 min [ R ] subject to R>=0, [ XY[i]-C ] <= R^2 C,R [ ] [ ] i.e. it becomes smooth quadratically constrained optimization problem with linear target function. Such problem statement is 100% equivalent to the original nonsmooth one, but much easier to approach. We solve it with MinNLC solver provided by ALGLIB. NOTE: ON INSTABILITY OF SEQUENTIAL LINEARIZATION APPROACH ALGLIB has nonlinearly constrained solver which proved to be stable on such problems. However, some authors proposed to linearize constraints in the vicinity of current approximation (Ci,Ri) and to get next approximate solution (Ci+1,Ri+1) as solution to linear programming problem. Obviously, LP problems are easier than nonlinearly constrained ones. Indeed, such approach to MC/MI/MZ resulted in ~10-20x increase in performance (when compared with NLC solver). However, it turned out that in some cases linearized model fails to predict correct direction for next step and tells us that we converged to solution even when we are still 2-4 digits of precision away from it. It is important that it is not failure of LP solver - it is failure of the linear model; even when solved exactly, it fails to handle subtle nonlinearities which arise near the solution. We validated it by comparing results returned by ALGLIB linear solver with that of MATLAB. In our experiments with linearization: * MC failed most often, at both realistic and synthetic datasets * MI sometimes failed, but sometimes succeeded * MZ often succeeded; our guess is that presence of two independent sets of constraints (one set for Rlo and another one for Rhi) and two terms in the target function (Rlo and Rhi) regularizes task, so when linear model fails to handle nonlinearities from Rlo, it uses Rhi as a hint (and vice versa). Because linearization approach failed to achieve stable results, we do not include it in ALGLIB. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void fitspherex(double[,] xy, int npoints, int nx, int problemtype, double epsx, int aulits, double penalty, out double[] cx, out double rlo, out double rhi) { cx = new double[0]; rlo = 0; rhi = 0; fitsphere.fitspherex(xy, npoints, nx, problemtype, epsx, aulits, penalty, ref cx, ref rlo, ref rhi, null); } public static void fitspherex(double[,] xy, int npoints, int nx, int problemtype, double epsx, int aulits, double penalty, out double[] cx, out double rlo, out double rhi, alglib.xparams _params) { cx = new double[0]; rlo = 0; rhi = 0; fitsphere.fitspherex(xy, npoints, nx, problemtype, epsx, aulits, penalty, ref cx, ref rlo, ref rhi, _params); } } public partial class alglib { } public partial class alglib { /************************************************************************* 1-dimensional spline interpolant *************************************************************************/ public class spline1dinterpolant : alglibobject { // // Public declarations // public spline1dinterpolant() { _innerobj = new spline1d.spline1dinterpolant(); } public override alglib.alglibobject make_copy() { return new spline1dinterpolant((spline1d.spline1dinterpolant)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private spline1d.spline1dinterpolant _innerobj; public spline1d.spline1dinterpolant innerobj { get { return _innerobj; } } public spline1dinterpolant(spline1d.spline1dinterpolant obj) { _innerobj = obj; } } /************************************************************************* Spline fitting report: RMSError RMS error AvgError average error AvgRelError average relative error (for non-zero Y[I]) MaxError maximum error Fields below are filled by obsolete functions (Spline1DFitCubic, Spline1DFitHermite). Modern fitting functions do NOT fill these fields: TaskRCond reciprocal of task's condition number *************************************************************************/ public class spline1dfitreport : alglibobject { // // Public declarations // public double taskrcond { get { return _innerobj.taskrcond; } set { _innerobj.taskrcond = value; } } public double rmserror { get { return _innerobj.rmserror; } set { _innerobj.rmserror = value; } } public double avgerror { get { return _innerobj.avgerror; } set { _innerobj.avgerror = value; } } public double avgrelerror { get { return _innerobj.avgrelerror; } set { _innerobj.avgrelerror = value; } } public double maxerror { get { return _innerobj.maxerror; } set { _innerobj.maxerror = value; } } public spline1dfitreport() { _innerobj = new spline1d.spline1dfitreport(); } public override alglib.alglibobject make_copy() { return new spline1dfitreport((spline1d.spline1dfitreport)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private spline1d.spline1dfitreport _innerobj; public spline1d.spline1dfitreport innerobj { get { return _innerobj; } } public spline1dfitreport(spline1d.spline1dfitreport obj) { _innerobj = obj; } } /************************************************************************* This subroutine builds linear spline interpolant INPUT PARAMETERS: X - spline nodes, array[0..N-1] Y - function values, array[0..N-1] N - points count (optional): * N>=2 * if given, only first N points are used to build spline * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) OUTPUT PARAMETERS: C - spline interpolant ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. -- ALGLIB PROJECT -- Copyright 24.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1dbuildlinear(double[] x, double[] y, int n, out spline1dinterpolant c) { c = new spline1dinterpolant(); spline1d.spline1dbuildlinear(x, y, n, c.innerobj, null); } public static void spline1dbuildlinear(double[] x, double[] y, int n, out spline1dinterpolant c, alglib.xparams _params) { c = new spline1dinterpolant(); spline1d.spline1dbuildlinear(x, y, n, c.innerobj, _params); } public static void spline1dbuildlinear(double[] x, double[] y, out spline1dinterpolant c) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dbuildlinear': looks like one of arguments has wrong size"); c = new spline1dinterpolant(); n = ap.len(x); spline1d.spline1dbuildlinear(x, y, n, c.innerobj, null); return; } public static void spline1dbuildlinear(double[] x, double[] y, out spline1dinterpolant c, alglib.xparams _params) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dbuildlinear': looks like one of arguments has wrong size"); c = new spline1dinterpolant(); n = ap.len(x); spline1d.spline1dbuildlinear(x, y, n, c.innerobj, _params); return; } /************************************************************************* This subroutine builds cubic spline interpolant. INPUT PARAMETERS: X - spline nodes, array[0..N-1]. Y - function values, array[0..N-1]. OPTIONAL PARAMETERS: N - points count: * N>=2 * if given, only first N points are used to build spline * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) BoundLType - boundary condition type for the left boundary BoundL - left boundary condition (first or second derivative, depending on the BoundLType) BoundRType - boundary condition type for the right boundary BoundR - right boundary condition (first or second derivative, depending on the BoundRType) OUTPUT PARAMETERS: C - spline interpolant ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. SETTING BOUNDARY VALUES: The BoundLType/BoundRType parameters can have the following values: * -1, which corresonds to the periodic (cyclic) boundary conditions. In this case: * both BoundLType and BoundRType must be equal to -1. * BoundL/BoundR are ignored * Y[last] is ignored (it is assumed to be equal to Y[first]). * 0, which corresponds to the parabolically terminated spline (BoundL and/or BoundR are ignored). * 1, which corresponds to the first derivative boundary condition * 2, which corresponds to the second derivative boundary condition * by default, BoundType=0 is used PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. However, this subroutine doesn't require you to specify equal values for the first and last points - it automatically forces them to be equal by copying Y[first_point] (corresponds to the leftmost, minimal X[]) to Y[last_point]. However it is recommended to pass consistent values of Y[], i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- Copyright 23.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1dbuildcubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, out spline1dinterpolant c) { c = new spline1dinterpolant(); spline1d.spline1dbuildcubic(x, y, n, boundltype, boundl, boundrtype, boundr, c.innerobj, null); } public static void spline1dbuildcubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, out spline1dinterpolant c, alglib.xparams _params) { c = new spline1dinterpolant(); spline1d.spline1dbuildcubic(x, y, n, boundltype, boundl, boundrtype, boundr, c.innerobj, _params); } public static void spline1dbuildcubic(double[] x, double[] y, out spline1dinterpolant c) { int n; int boundltype; double boundl; int boundrtype; double boundr; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dbuildcubic': looks like one of arguments has wrong size"); c = new spline1dinterpolant(); n = ap.len(x); boundltype = 0; boundl = 0; boundrtype = 0; boundr = 0; spline1d.spline1dbuildcubic(x, y, n, boundltype, boundl, boundrtype, boundr, c.innerobj, null); return; } public static void spline1dbuildcubic(double[] x, double[] y, out spline1dinterpolant c, alglib.xparams _params) { int n; int boundltype; double boundl; int boundrtype; double boundr; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dbuildcubic': looks like one of arguments has wrong size"); c = new spline1dinterpolant(); n = ap.len(x); boundltype = 0; boundl = 0; boundrtype = 0; boundr = 0; spline1d.spline1dbuildcubic(x, y, n, boundltype, boundl, boundrtype, boundr, c.innerobj, _params); return; } /************************************************************************* This function solves following problem: given table y[] of function values at nodes x[], it calculates and returns table of function derivatives d[] (calculated at the same nodes x[]). This function yields same result as Spline1DBuildCubic() call followed by sequence of Spline1DDiff() calls, but it can be several times faster when called for ordered X[] and X2[]. INPUT PARAMETERS: X - spline nodes Y - function values OPTIONAL PARAMETERS: N - points count: * N>=2 * if given, only first N points are used * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) BoundLType - boundary condition type for the left boundary BoundL - left boundary condition (first or second derivative, depending on the BoundLType) BoundRType - boundary condition type for the right boundary BoundR - right boundary condition (first or second derivative, depending on the BoundRType) OUTPUT PARAMETERS: D - derivative values at X[] ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. Derivative values are correctly reordered on return, so D[I] is always equal to S'(X[I]) independently of points order. SETTING BOUNDARY VALUES: The BoundLType/BoundRType parameters can have the following values: * -1, which corresonds to the periodic (cyclic) boundary conditions. In this case: * both BoundLType and BoundRType must be equal to -1. * BoundL/BoundR are ignored * Y[last] is ignored (it is assumed to be equal to Y[first]). * 0, which corresponds to the parabolically terminated spline (BoundL and/or BoundR are ignored). * 1, which corresponds to the first derivative boundary condition * 2, which corresponds to the second derivative boundary condition * by default, BoundType=0 is used PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. However, this subroutine doesn't require you to specify equal values for the first and last points - it automatically forces them to be equal by copying Y[first_point] (corresponds to the leftmost, minimal X[]) to Y[last_point]. However it is recommended to pass consistent values of Y[], i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ public static void spline1dgriddiffcubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, out double[] d) { d = new double[0]; spline1d.spline1dgriddiffcubic(x, y, n, boundltype, boundl, boundrtype, boundr, ref d, null); } public static void spline1dgriddiffcubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, out double[] d, alglib.xparams _params) { d = new double[0]; spline1d.spline1dgriddiffcubic(x, y, n, boundltype, boundl, boundrtype, boundr, ref d, _params); } public static void spline1dgriddiffcubic(double[] x, double[] y, out double[] d) { int n; int boundltype; double boundl; int boundrtype; double boundr; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dgriddiffcubic': looks like one of arguments has wrong size"); d = new double[0]; n = ap.len(x); boundltype = 0; boundl = 0; boundrtype = 0; boundr = 0; spline1d.spline1dgriddiffcubic(x, y, n, boundltype, boundl, boundrtype, boundr, ref d, null); return; } public static void spline1dgriddiffcubic(double[] x, double[] y, out double[] d, alglib.xparams _params) { int n; int boundltype; double boundl; int boundrtype; double boundr; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dgriddiffcubic': looks like one of arguments has wrong size"); d = new double[0]; n = ap.len(x); boundltype = 0; boundl = 0; boundrtype = 0; boundr = 0; spline1d.spline1dgriddiffcubic(x, y, n, boundltype, boundl, boundrtype, boundr, ref d, _params); return; } /************************************************************************* This function solves following problem: given table y[] of function values at nodes x[], it calculates and returns tables of first and second function derivatives d1[] and d2[] (calculated at the same nodes x[]). This function yields same result as Spline1DBuildCubic() call followed by sequence of Spline1DDiff() calls, but it can be several times faster when called for ordered X[] and X2[]. INPUT PARAMETERS: X - spline nodes Y - function values OPTIONAL PARAMETERS: N - points count: * N>=2 * if given, only first N points are used * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) BoundLType - boundary condition type for the left boundary BoundL - left boundary condition (first or second derivative, depending on the BoundLType) BoundRType - boundary condition type for the right boundary BoundR - right boundary condition (first or second derivative, depending on the BoundRType) OUTPUT PARAMETERS: D1 - S' values at X[] D2 - S'' values at X[] ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. Derivative values are correctly reordered on return, so D[I] is always equal to S'(X[I]) independently of points order. SETTING BOUNDARY VALUES: The BoundLType/BoundRType parameters can have the following values: * -1, which corresonds to the periodic (cyclic) boundary conditions. In this case: * both BoundLType and BoundRType must be equal to -1. * BoundL/BoundR are ignored * Y[last] is ignored (it is assumed to be equal to Y[first]). * 0, which corresponds to the parabolically terminated spline (BoundL and/or BoundR are ignored). * 1, which corresponds to the first derivative boundary condition * 2, which corresponds to the second derivative boundary condition * by default, BoundType=0 is used PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. However, this subroutine doesn't require you to specify equal values for the first and last points - it automatically forces them to be equal by copying Y[first_point] (corresponds to the leftmost, minimal X[]) to Y[last_point]. However it is recommended to pass consistent values of Y[], i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ public static void spline1dgriddiff2cubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, out double[] d1, out double[] d2) { d1 = new double[0]; d2 = new double[0]; spline1d.spline1dgriddiff2cubic(x, y, n, boundltype, boundl, boundrtype, boundr, ref d1, ref d2, null); } public static void spline1dgriddiff2cubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, out double[] d1, out double[] d2, alglib.xparams _params) { d1 = new double[0]; d2 = new double[0]; spline1d.spline1dgriddiff2cubic(x, y, n, boundltype, boundl, boundrtype, boundr, ref d1, ref d2, _params); } public static void spline1dgriddiff2cubic(double[] x, double[] y, out double[] d1, out double[] d2) { int n; int boundltype; double boundl; int boundrtype; double boundr; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dgriddiff2cubic': looks like one of arguments has wrong size"); d1 = new double[0]; d2 = new double[0]; n = ap.len(x); boundltype = 0; boundl = 0; boundrtype = 0; boundr = 0; spline1d.spline1dgriddiff2cubic(x, y, n, boundltype, boundl, boundrtype, boundr, ref d1, ref d2, null); return; } public static void spline1dgriddiff2cubic(double[] x, double[] y, out double[] d1, out double[] d2, alglib.xparams _params) { int n; int boundltype; double boundl; int boundrtype; double boundr; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dgriddiff2cubic': looks like one of arguments has wrong size"); d1 = new double[0]; d2 = new double[0]; n = ap.len(x); boundltype = 0; boundl = 0; boundrtype = 0; boundr = 0; spline1d.spline1dgriddiff2cubic(x, y, n, boundltype, boundl, boundrtype, boundr, ref d1, ref d2, _params); return; } /************************************************************************* This function solves following problem: given table y[] of function values at old nodes x[] and new nodes x2[], it calculates and returns table of function values y2[] (calculated at x2[]). This function yields same result as Spline1DBuildCubic() call followed by sequence of Spline1DDiff() calls, but it can be several times faster when called for ordered X[] and X2[]. INPUT PARAMETERS: X - old spline nodes Y - function values X2 - new spline nodes OPTIONAL PARAMETERS: N - points count: * N>=2 * if given, only first N points from X/Y are used * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) BoundLType - boundary condition type for the left boundary BoundL - left boundary condition (first or second derivative, depending on the BoundLType) BoundRType - boundary condition type for the right boundary BoundR - right boundary condition (first or second derivative, depending on the BoundRType) N2 - new points count: * N2>=2 * if given, only first N2 points from X2 are used * if not given, automatically detected from X2 size OUTPUT PARAMETERS: F2 - function values at X2[] ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. Function values are correctly reordered on return, so F2[I] is always equal to S(X2[I]) independently of points order. SETTING BOUNDARY VALUES: The BoundLType/BoundRType parameters can have the following values: * -1, which corresonds to the periodic (cyclic) boundary conditions. In this case: * both BoundLType and BoundRType must be equal to -1. * BoundL/BoundR are ignored * Y[last] is ignored (it is assumed to be equal to Y[first]). * 0, which corresponds to the parabolically terminated spline (BoundL and/or BoundR are ignored). * 1, which corresponds to the first derivative boundary condition * 2, which corresponds to the second derivative boundary condition * by default, BoundType=0 is used PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. However, this subroutine doesn't require you to specify equal values for the first and last points - it automatically forces them to be equal by copying Y[first_point] (corresponds to the leftmost, minimal X[]) to Y[last_point]. However it is recommended to pass consistent values of Y[], i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ public static void spline1dconvcubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, double[] x2, int n2, out double[] y2) { y2 = new double[0]; spline1d.spline1dconvcubic(x, y, n, boundltype, boundl, boundrtype, boundr, x2, n2, ref y2, null); } public static void spline1dconvcubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, double[] x2, int n2, out double[] y2, alglib.xparams _params) { y2 = new double[0]; spline1d.spline1dconvcubic(x, y, n, boundltype, boundl, boundrtype, boundr, x2, n2, ref y2, _params); } public static void spline1dconvcubic(double[] x, double[] y, double[] x2, out double[] y2) { int n; int boundltype; double boundl; int boundrtype; double boundr; int n2; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dconvcubic': looks like one of arguments has wrong size"); y2 = new double[0]; n = ap.len(x); boundltype = 0; boundl = 0; boundrtype = 0; boundr = 0; n2 = ap.len(x2); spline1d.spline1dconvcubic(x, y, n, boundltype, boundl, boundrtype, boundr, x2, n2, ref y2, null); return; } public static void spline1dconvcubic(double[] x, double[] y, double[] x2, out double[] y2, alglib.xparams _params) { int n; int boundltype; double boundl; int boundrtype; double boundr; int n2; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dconvcubic': looks like one of arguments has wrong size"); y2 = new double[0]; n = ap.len(x); boundltype = 0; boundl = 0; boundrtype = 0; boundr = 0; n2 = ap.len(x2); spline1d.spline1dconvcubic(x, y, n, boundltype, boundl, boundrtype, boundr, x2, n2, ref y2, _params); return; } /************************************************************************* This function solves following problem: given table y[] of function values at old nodes x[] and new nodes x2[], it calculates and returns table of function values y2[] and derivatives d2[] (calculated at x2[]). This function yields same result as Spline1DBuildCubic() call followed by sequence of Spline1DDiff() calls, but it can be several times faster when called for ordered X[] and X2[]. INPUT PARAMETERS: X - old spline nodes Y - function values X2 - new spline nodes OPTIONAL PARAMETERS: N - points count: * N>=2 * if given, only first N points from X/Y are used * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) BoundLType - boundary condition type for the left boundary BoundL - left boundary condition (first or second derivative, depending on the BoundLType) BoundRType - boundary condition type for the right boundary BoundR - right boundary condition (first or second derivative, depending on the BoundRType) N2 - new points count: * N2>=2 * if given, only first N2 points from X2 are used * if not given, automatically detected from X2 size OUTPUT PARAMETERS: F2 - function values at X2[] D2 - first derivatives at X2[] ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. Function values are correctly reordered on return, so F2[I] is always equal to S(X2[I]) independently of points order. SETTING BOUNDARY VALUES: The BoundLType/BoundRType parameters can have the following values: * -1, which corresonds to the periodic (cyclic) boundary conditions. In this case: * both BoundLType and BoundRType must be equal to -1. * BoundL/BoundR are ignored * Y[last] is ignored (it is assumed to be equal to Y[first]). * 0, which corresponds to the parabolically terminated spline (BoundL and/or BoundR are ignored). * 1, which corresponds to the first derivative boundary condition * 2, which corresponds to the second derivative boundary condition * by default, BoundType=0 is used PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. However, this subroutine doesn't require you to specify equal values for the first and last points - it automatically forces them to be equal by copying Y[first_point] (corresponds to the leftmost, minimal X[]) to Y[last_point]. However it is recommended to pass consistent values of Y[], i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ public static void spline1dconvdiffcubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, double[] x2, int n2, out double[] y2, out double[] d2) { y2 = new double[0]; d2 = new double[0]; spline1d.spline1dconvdiffcubic(x, y, n, boundltype, boundl, boundrtype, boundr, x2, n2, ref y2, ref d2, null); } public static void spline1dconvdiffcubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, double[] x2, int n2, out double[] y2, out double[] d2, alglib.xparams _params) { y2 = new double[0]; d2 = new double[0]; spline1d.spline1dconvdiffcubic(x, y, n, boundltype, boundl, boundrtype, boundr, x2, n2, ref y2, ref d2, _params); } public static void spline1dconvdiffcubic(double[] x, double[] y, double[] x2, out double[] y2, out double[] d2) { int n; int boundltype; double boundl; int boundrtype; double boundr; int n2; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dconvdiffcubic': looks like one of arguments has wrong size"); y2 = new double[0]; d2 = new double[0]; n = ap.len(x); boundltype = 0; boundl = 0; boundrtype = 0; boundr = 0; n2 = ap.len(x2); spline1d.spline1dconvdiffcubic(x, y, n, boundltype, boundl, boundrtype, boundr, x2, n2, ref y2, ref d2, null); return; } public static void spline1dconvdiffcubic(double[] x, double[] y, double[] x2, out double[] y2, out double[] d2, alglib.xparams _params) { int n; int boundltype; double boundl; int boundrtype; double boundr; int n2; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dconvdiffcubic': looks like one of arguments has wrong size"); y2 = new double[0]; d2 = new double[0]; n = ap.len(x); boundltype = 0; boundl = 0; boundrtype = 0; boundr = 0; n2 = ap.len(x2); spline1d.spline1dconvdiffcubic(x, y, n, boundltype, boundl, boundrtype, boundr, x2, n2, ref y2, ref d2, _params); return; } /************************************************************************* This function solves following problem: given table y[] of function values at old nodes x[] and new nodes x2[], it calculates and returns table of function values y2[], first and second derivatives d2[] and dd2[] (calculated at x2[]). This function yields same result as Spline1DBuildCubic() call followed by sequence of Spline1DDiff() calls, but it can be several times faster when called for ordered X[] and X2[]. INPUT PARAMETERS: X - old spline nodes Y - function values X2 - new spline nodes OPTIONAL PARAMETERS: N - points count: * N>=2 * if given, only first N points from X/Y are used * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) BoundLType - boundary condition type for the left boundary BoundL - left boundary condition (first or second derivative, depending on the BoundLType) BoundRType - boundary condition type for the right boundary BoundR - right boundary condition (first or second derivative, depending on the BoundRType) N2 - new points count: * N2>=2 * if given, only first N2 points from X2 are used * if not given, automatically detected from X2 size OUTPUT PARAMETERS: F2 - function values at X2[] D2 - first derivatives at X2[] DD2 - second derivatives at X2[] ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. Function values are correctly reordered on return, so F2[I] is always equal to S(X2[I]) independently of points order. SETTING BOUNDARY VALUES: The BoundLType/BoundRType parameters can have the following values: * -1, which corresonds to the periodic (cyclic) boundary conditions. In this case: * both BoundLType and BoundRType must be equal to -1. * BoundL/BoundR are ignored * Y[last] is ignored (it is assumed to be equal to Y[first]). * 0, which corresponds to the parabolically terminated spline (BoundL and/or BoundR are ignored). * 1, which corresponds to the first derivative boundary condition * 2, which corresponds to the second derivative boundary condition * by default, BoundType=0 is used PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. However, this subroutine doesn't require you to specify equal values for the first and last points - it automatically forces them to be equal by copying Y[first_point] (corresponds to the leftmost, minimal X[]) to Y[last_point]. However it is recommended to pass consistent values of Y[], i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ public static void spline1dconvdiff2cubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, double[] x2, int n2, out double[] y2, out double[] d2, out double[] dd2) { y2 = new double[0]; d2 = new double[0]; dd2 = new double[0]; spline1d.spline1dconvdiff2cubic(x, y, n, boundltype, boundl, boundrtype, boundr, x2, n2, ref y2, ref d2, ref dd2, null); } public static void spline1dconvdiff2cubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, double[] x2, int n2, out double[] y2, out double[] d2, out double[] dd2, alglib.xparams _params) { y2 = new double[0]; d2 = new double[0]; dd2 = new double[0]; spline1d.spline1dconvdiff2cubic(x, y, n, boundltype, boundl, boundrtype, boundr, x2, n2, ref y2, ref d2, ref dd2, _params); } public static void spline1dconvdiff2cubic(double[] x, double[] y, double[] x2, out double[] y2, out double[] d2, out double[] dd2) { int n; int boundltype; double boundl; int boundrtype; double boundr; int n2; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dconvdiff2cubic': looks like one of arguments has wrong size"); y2 = new double[0]; d2 = new double[0]; dd2 = new double[0]; n = ap.len(x); boundltype = 0; boundl = 0; boundrtype = 0; boundr = 0; n2 = ap.len(x2); spline1d.spline1dconvdiff2cubic(x, y, n, boundltype, boundl, boundrtype, boundr, x2, n2, ref y2, ref d2, ref dd2, null); return; } public static void spline1dconvdiff2cubic(double[] x, double[] y, double[] x2, out double[] y2, out double[] d2, out double[] dd2, alglib.xparams _params) { int n; int boundltype; double boundl; int boundrtype; double boundr; int n2; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dconvdiff2cubic': looks like one of arguments has wrong size"); y2 = new double[0]; d2 = new double[0]; dd2 = new double[0]; n = ap.len(x); boundltype = 0; boundl = 0; boundrtype = 0; boundr = 0; n2 = ap.len(x2); spline1d.spline1dconvdiff2cubic(x, y, n, boundltype, boundl, boundrtype, boundr, x2, n2, ref y2, ref d2, ref dd2, _params); return; } /************************************************************************* This subroutine builds Catmull-Rom spline interpolant. INPUT PARAMETERS: X - spline nodes, array[0..N-1]. Y - function values, array[0..N-1]. OPTIONAL PARAMETERS: N - points count: * N>=2 * if given, only first N points are used to build spline * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) BoundType - boundary condition type: * -1 for periodic boundary condition * 0 for parabolically terminated spline (default) Tension - tension parameter: * tension=0 corresponds to classic Catmull-Rom spline (default) * 0=2 * if given, only first N points are used to build spline * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) OUTPUT PARAMETERS: C - spline interpolant. ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. -- ALGLIB PROJECT -- Copyright 23.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1dbuildhermite(double[] x, double[] y, double[] d, int n, out spline1dinterpolant c) { c = new spline1dinterpolant(); spline1d.spline1dbuildhermite(x, y, d, n, c.innerobj, null); } public static void spline1dbuildhermite(double[] x, double[] y, double[] d, int n, out spline1dinterpolant c, alglib.xparams _params) { c = new spline1dinterpolant(); spline1d.spline1dbuildhermite(x, y, d, n, c.innerobj, _params); } public static void spline1dbuildhermite(double[] x, double[] y, double[] d, out spline1dinterpolant c) { int n; if( (ap.len(x)!=ap.len(y)) || (ap.len(x)!=ap.len(d))) throw new alglibexception("Error while calling 'spline1dbuildhermite': looks like one of arguments has wrong size"); c = new spline1dinterpolant(); n = ap.len(x); spline1d.spline1dbuildhermite(x, y, d, n, c.innerobj, null); return; } public static void spline1dbuildhermite(double[] x, double[] y, double[] d, out spline1dinterpolant c, alglib.xparams _params) { int n; if( (ap.len(x)!=ap.len(y)) || (ap.len(x)!=ap.len(d))) throw new alglibexception("Error while calling 'spline1dbuildhermite': looks like one of arguments has wrong size"); c = new spline1dinterpolant(); n = ap.len(x); spline1d.spline1dbuildhermite(x, y, d, n, c.innerobj, _params); return; } /************************************************************************* This subroutine builds Akima spline interpolant INPUT PARAMETERS: X - spline nodes, array[0..N-1] Y - function values, array[0..N-1] N - points count (optional): * N>=2 * if given, only first N points are used to build spline * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) OUTPUT PARAMETERS: C - spline interpolant ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. -- ALGLIB PROJECT -- Copyright 24.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1dbuildakima(double[] x, double[] y, int n, out spline1dinterpolant c) { c = new spline1dinterpolant(); spline1d.spline1dbuildakima(x, y, n, c.innerobj, null); } public static void spline1dbuildakima(double[] x, double[] y, int n, out spline1dinterpolant c, alglib.xparams _params) { c = new spline1dinterpolant(); spline1d.spline1dbuildakima(x, y, n, c.innerobj, _params); } public static void spline1dbuildakima(double[] x, double[] y, out spline1dinterpolant c) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dbuildakima': looks like one of arguments has wrong size"); c = new spline1dinterpolant(); n = ap.len(x); spline1d.spline1dbuildakima(x, y, n, c.innerobj, null); return; } public static void spline1dbuildakima(double[] x, double[] y, out spline1dinterpolant c, alglib.xparams _params) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dbuildakima': looks like one of arguments has wrong size"); c = new spline1dinterpolant(); n = ap.len(x); spline1d.spline1dbuildakima(x, y, n, c.innerobj, _params); return; } /************************************************************************* This subroutine calculates the value of the spline at the given point X. INPUT PARAMETERS: C - spline interpolant X - point Result: S(x) -- ALGLIB PROJECT -- Copyright 23.06.2007 by Bochkanov Sergey *************************************************************************/ public static double spline1dcalc(spline1dinterpolant c, double x) { return spline1d.spline1dcalc(c.innerobj, x, null); } public static double spline1dcalc(spline1dinterpolant c, double x, alglib.xparams _params) { return spline1d.spline1dcalc(c.innerobj, x, _params); } /************************************************************************* This subroutine differentiates the spline. INPUT PARAMETERS: C - spline interpolant. X - point Result: S - S(x) DS - S'(x) D2S - S''(x) -- ALGLIB PROJECT -- Copyright 24.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1ddiff(spline1dinterpolant c, double x, out double s, out double ds, out double d2s) { s = 0; ds = 0; d2s = 0; spline1d.spline1ddiff(c.innerobj, x, ref s, ref ds, ref d2s, null); } public static void spline1ddiff(spline1dinterpolant c, double x, out double s, out double ds, out double d2s, alglib.xparams _params) { s = 0; ds = 0; d2s = 0; spline1d.spline1ddiff(c.innerobj, x, ref s, ref ds, ref d2s, _params); } /************************************************************************* This subroutine unpacks the spline into the coefficients table. INPUT PARAMETERS: C - spline interpolant. X - point OUTPUT PARAMETERS: Tbl - coefficients table, unpacked format, array[0..N-2, 0..5]. For I = 0...N-2: Tbl[I,0] = X[i] Tbl[I,1] = X[i+1] Tbl[I,2] = C0 Tbl[I,3] = C1 Tbl[I,4] = C2 Tbl[I,5] = C3 On [x[i], x[i+1]] spline is equals to: S(x) = C0 + C1*t + C2*t^2 + C3*t^3 t = x-x[i] NOTE: You can rebuild spline with Spline1DBuildHermite() function, which accepts as inputs function values and derivatives at nodes, which are easy to calculate when you have coefficients. -- ALGLIB PROJECT -- Copyright 29.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1dunpack(spline1dinterpolant c, out int n, out double[,] tbl) { n = 0; tbl = new double[0,0]; spline1d.spline1dunpack(c.innerobj, ref n, ref tbl, null); } public static void spline1dunpack(spline1dinterpolant c, out int n, out double[,] tbl, alglib.xparams _params) { n = 0; tbl = new double[0,0]; spline1d.spline1dunpack(c.innerobj, ref n, ref tbl, _params); } /************************************************************************* This subroutine performs linear transformation of the spline argument. INPUT PARAMETERS: C - spline interpolant. A, B- transformation coefficients: x = A*t + B Result: C - transformed spline -- ALGLIB PROJECT -- Copyright 30.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1dlintransx(spline1dinterpolant c, double a, double b) { spline1d.spline1dlintransx(c.innerobj, a, b, null); } public static void spline1dlintransx(spline1dinterpolant c, double a, double b, alglib.xparams _params) { spline1d.spline1dlintransx(c.innerobj, a, b, _params); } /************************************************************************* This subroutine performs linear transformation of the spline. INPUT PARAMETERS: C - spline interpolant. A, B- transformation coefficients: S2(x) = A*S(x) + B Result: C - transformed spline -- ALGLIB PROJECT -- Copyright 30.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1dlintransy(spline1dinterpolant c, double a, double b) { spline1d.spline1dlintransy(c.innerobj, a, b, null); } public static void spline1dlintransy(spline1dinterpolant c, double a, double b, alglib.xparams _params) { spline1d.spline1dlintransy(c.innerobj, a, b, _params); } /************************************************************************* This subroutine integrates the spline. INPUT PARAMETERS: C - spline interpolant. X - right bound of the integration interval [a, x], here 'a' denotes min(x[]) Result: integral(S(t)dt,a,x) -- ALGLIB PROJECT -- Copyright 23.06.2007 by Bochkanov Sergey *************************************************************************/ public static double spline1dintegrate(spline1dinterpolant c, double x) { return spline1d.spline1dintegrate(c.innerobj, x, null); } public static double spline1dintegrate(spline1dinterpolant c, double x, alglib.xparams _params) { return spline1d.spline1dintegrate(c.innerobj, x, _params); } /************************************************************************* Fitting by penalized cubic spline. Equidistant grid with M nodes on [min(x,xc),max(x,xc)] is used to build basis functions. Basis functions are cubic splines with natural boundary conditions. Problem is regularized by adding non-linearity penalty to the usual least squares penalty function: S(x) = arg min { LS + P }, where LS = SUM { w[i]^2*(y[i] - S(x[i]))^2 } - least squares penalty P = C*10^rho*integral{ S''(x)^2*dx } - non-linearity penalty rho - tunable constant given by user C - automatically determined scale parameter, makes penalty invariant with respect to scaling of X, Y, W. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - points, array[0..N-1]. Y - function values, array[0..N-1]. N - number of points (optional): * N>0 * if given, only first N elements of X/Y are processed * if not given, automatically determined from X/Y sizes M - number of basis functions ( = number_of_nodes), M>=4. Rho - regularization constant passed by user. It penalizes nonlinearity in the regression spline. It is logarithmically scaled, i.e. actual value of regularization constant is calculated as 10^Rho. It is automatically scaled so that: * Rho=2.0 corresponds to moderate amount of nonlinearity * generally, it should be somewhere in the [-8.0,+8.0] If you do not want to penalize nonlineary, pass small Rho. Values as low as -15 should work. OUTPUT PARAMETERS: Info- same format as in LSFitLinearWC() subroutine. * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD or Cholesky decomposition; problem may be too ill-conditioned (very rare) S - spline interpolant. Rep - Following fields are set: * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroitine doesn't calculate task's condition number for K<>0. NOTE 1: additional nodes are added to the spline outside of the fitting interval to force linearity when xmax(x,xc). It is done for consistency - we penalize non-linearity at [min(x,xc),max(x,xc)], so it is natural to force linearity outside of this interval. NOTE 2: function automatically sorts points, so caller may pass unsorted array. -- ALGLIB PROJECT -- Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ public static void spline1dfitpenalized(double[] x, double[] y, int n, int m, double rho, out int info, out spline1dinterpolant s, out spline1dfitreport rep) { info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); spline1d.spline1dfitpenalized(x, y, n, m, rho, ref info, s.innerobj, rep.innerobj, null); } public static void spline1dfitpenalized(double[] x, double[] y, int n, int m, double rho, out int info, out spline1dinterpolant s, out spline1dfitreport rep, alglib.xparams _params) { info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); spline1d.spline1dfitpenalized(x, y, n, m, rho, ref info, s.innerobj, rep.innerobj, _params); } public static void spline1dfitpenalized(double[] x, double[] y, int m, double rho, out int info, out spline1dinterpolant s, out spline1dfitreport rep) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dfitpenalized': looks like one of arguments has wrong size"); info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); n = ap.len(x); spline1d.spline1dfitpenalized(x, y, n, m, rho, ref info, s.innerobj, rep.innerobj, null); return; } public static void spline1dfitpenalized(double[] x, double[] y, int m, double rho, out int info, out spline1dinterpolant s, out spline1dfitreport rep, alglib.xparams _params) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dfitpenalized': looks like one of arguments has wrong size"); info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); n = ap.len(x); spline1d.spline1dfitpenalized(x, y, n, m, rho, ref info, s.innerobj, rep.innerobj, _params); return; } /************************************************************************* Weighted fitting by penalized cubic spline. Equidistant grid with M nodes on [min(x,xc),max(x,xc)] is used to build basis functions. Basis functions are cubic splines with natural boundary conditions. Problem is regularized by adding non-linearity penalty to the usual least squares penalty function: S(x) = arg min { LS + P }, where LS = SUM { w[i]^2*(y[i] - S(x[i]))^2 } - least squares penalty P = C*10^rho*integral{ S''(x)^2*dx } - non-linearity penalty rho - tunable constant given by user C - automatically determined scale parameter, makes penalty invariant with respect to scaling of X, Y, W. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - points, array[0..N-1]. Y - function values, array[0..N-1]. W - weights, array[0..N-1] Each summand in square sum of approximation deviations from given values is multiplied by the square of corresponding weight. Fill it by 1's if you don't want to solve weighted problem. N - number of points (optional): * N>0 * if given, only first N elements of X/Y/W are processed * if not given, automatically determined from X/Y/W sizes M - number of basis functions ( = number_of_nodes), M>=4. Rho - regularization constant passed by user. It penalizes nonlinearity in the regression spline. It is logarithmically scaled, i.e. actual value of regularization constant is calculated as 10^Rho. It is automatically scaled so that: * Rho=2.0 corresponds to moderate amount of nonlinearity * generally, it should be somewhere in the [-8.0,+8.0] If you do not want to penalize nonlineary, pass small Rho. Values as low as -15 should work. OUTPUT PARAMETERS: Info- same format as in LSFitLinearWC() subroutine. * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD or Cholesky decomposition; problem may be too ill-conditioned (very rare) S - spline interpolant. Rep - Following fields are set: * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroitine doesn't calculate task's condition number for K<>0. NOTE 1: additional nodes are added to the spline outside of the fitting interval to force linearity when xmax(x,xc). It is done for consistency - we penalize non-linearity at [min(x,xc),max(x,xc)], so it is natural to force linearity outside of this interval. NOTE 2: function automatically sorts points, so caller may pass unsorted array. -- ALGLIB PROJECT -- Copyright 19.10.2010 by Bochkanov Sergey *************************************************************************/ public static void spline1dfitpenalizedw(double[] x, double[] y, double[] w, int n, int m, double rho, out int info, out spline1dinterpolant s, out spline1dfitreport rep) { info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); spline1d.spline1dfitpenalizedw(x, y, w, n, m, rho, ref info, s.innerobj, rep.innerobj, null); } public static void spline1dfitpenalizedw(double[] x, double[] y, double[] w, int n, int m, double rho, out int info, out spline1dinterpolant s, out spline1dfitreport rep, alglib.xparams _params) { info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); spline1d.spline1dfitpenalizedw(x, y, w, n, m, rho, ref info, s.innerobj, rep.innerobj, _params); } public static void spline1dfitpenalizedw(double[] x, double[] y, double[] w, int m, double rho, out int info, out spline1dinterpolant s, out spline1dfitreport rep) { int n; if( (ap.len(x)!=ap.len(y)) || (ap.len(x)!=ap.len(w))) throw new alglibexception("Error while calling 'spline1dfitpenalizedw': looks like one of arguments has wrong size"); info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); n = ap.len(x); spline1d.spline1dfitpenalizedw(x, y, w, n, m, rho, ref info, s.innerobj, rep.innerobj, null); return; } public static void spline1dfitpenalizedw(double[] x, double[] y, double[] w, int m, double rho, out int info, out spline1dinterpolant s, out spline1dfitreport rep, alglib.xparams _params) { int n; if( (ap.len(x)!=ap.len(y)) || (ap.len(x)!=ap.len(w))) throw new alglibexception("Error while calling 'spline1dfitpenalizedw': looks like one of arguments has wrong size"); info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); n = ap.len(x); spline1d.spline1dfitpenalizedw(x, y, w, n, m, rho, ref info, s.innerobj, rep.innerobj, _params); return; } /************************************************************************* This function builds monotone cubic Hermite interpolant. This interpolant is monotonic in [x(0),x(n-1)] and is constant outside of this interval. In case y[] form non-monotonic sequence, interpolant is piecewise monotonic. Say, for x=(0,1,2,3,4) and y=(0,1,2,1,0) interpolant will monotonically grow at [0..2] and monotonically decrease at [2..4]. INPUT PARAMETERS: X - spline nodes, array[0..N-1]. Subroutine automatically sorts points, so caller may pass unsorted array. Y - function values, array[0..N-1] N - the number of points(N>=2). OUTPUT PARAMETERS: C - spline interpolant. -- ALGLIB PROJECT -- Copyright 21.06.2012 by Bochkanov Sergey *************************************************************************/ public static void spline1dbuildmonotone(double[] x, double[] y, int n, out spline1dinterpolant c) { c = new spline1dinterpolant(); spline1d.spline1dbuildmonotone(x, y, n, c.innerobj, null); } public static void spline1dbuildmonotone(double[] x, double[] y, int n, out spline1dinterpolant c, alglib.xparams _params) { c = new spline1dinterpolant(); spline1d.spline1dbuildmonotone(x, y, n, c.innerobj, _params); } public static void spline1dbuildmonotone(double[] x, double[] y, out spline1dinterpolant c) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dbuildmonotone': looks like one of arguments has wrong size"); c = new spline1dinterpolant(); n = ap.len(x); spline1d.spline1dbuildmonotone(x, y, n, c.innerobj, null); return; } public static void spline1dbuildmonotone(double[] x, double[] y, out spline1dinterpolant c, alglib.xparams _params) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dbuildmonotone': looks like one of arguments has wrong size"); c = new spline1dinterpolant(); n = ap.len(x); spline1d.spline1dbuildmonotone(x, y, n, c.innerobj, _params); return; } } public partial class alglib { /************************************************************************* Parametric spline inteprolant: 2-dimensional curve. You should not try to access its members directly - use PSpline2XXXXXXXX() functions instead. *************************************************************************/ public class pspline2interpolant : alglibobject { // // Public declarations // public pspline2interpolant() { _innerobj = new parametric.pspline2interpolant(); } public override alglib.alglibobject make_copy() { return new pspline2interpolant((parametric.pspline2interpolant)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private parametric.pspline2interpolant _innerobj; public parametric.pspline2interpolant innerobj { get { return _innerobj; } } public pspline2interpolant(parametric.pspline2interpolant obj) { _innerobj = obj; } } /************************************************************************* Parametric spline inteprolant: 3-dimensional curve. You should not try to access its members directly - use PSpline3XXXXXXXX() functions instead. *************************************************************************/ public class pspline3interpolant : alglibobject { // // Public declarations // public pspline3interpolant() { _innerobj = new parametric.pspline3interpolant(); } public override alglib.alglibobject make_copy() { return new pspline3interpolant((parametric.pspline3interpolant)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private parametric.pspline3interpolant _innerobj; public parametric.pspline3interpolant innerobj { get { return _innerobj; } } public pspline3interpolant(parametric.pspline3interpolant obj) { _innerobj = obj; } } /************************************************************************* This function builds non-periodic 2-dimensional parametric spline which starts at (X[0],Y[0]) and ends at (X[N-1],Y[N-1]). INPUT PARAMETERS: XY - points, array[0..N-1,0..1]. XY[I,0:1] corresponds to the Ith point. Order of points is important! N - points count, N>=5 for Akima splines, N>=2 for other types of splines. ST - spline type: * 0 Akima spline * 1 parabolically terminated Catmull-Rom spline (Tension=0) * 2 parabolically terminated cubic spline PT - parameterization type: * 0 uniform * 1 chord length * 2 centripetal OUTPUT PARAMETERS: P - parametric spline interpolant NOTES: * this function assumes that there all consequent points are distinct. I.e. (x0,y0)<>(x1,y1), (x1,y1)<>(x2,y2), (x2,y2)<>(x3,y3) and so on. However, non-consequent points may coincide, i.e. we can have (x0,y0)= =(x2,y2). -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline2build(double[,] xy, int n, int st, int pt, out pspline2interpolant p) { p = new pspline2interpolant(); parametric.pspline2build(xy, n, st, pt, p.innerobj, null); } public static void pspline2build(double[,] xy, int n, int st, int pt, out pspline2interpolant p, alglib.xparams _params) { p = new pspline2interpolant(); parametric.pspline2build(xy, n, st, pt, p.innerobj, _params); } /************************************************************************* This function builds non-periodic 3-dimensional parametric spline which starts at (X[0],Y[0],Z[0]) and ends at (X[N-1],Y[N-1],Z[N-1]). Same as PSpline2Build() function, but for 3D, so we won't duplicate its description here. -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline3build(double[,] xy, int n, int st, int pt, out pspline3interpolant p) { p = new pspline3interpolant(); parametric.pspline3build(xy, n, st, pt, p.innerobj, null); } public static void pspline3build(double[,] xy, int n, int st, int pt, out pspline3interpolant p, alglib.xparams _params) { p = new pspline3interpolant(); parametric.pspline3build(xy, n, st, pt, p.innerobj, _params); } /************************************************************************* This function builds periodic 2-dimensional parametric spline which starts at (X[0],Y[0]), goes through all points to (X[N-1],Y[N-1]) and then back to (X[0],Y[0]). INPUT PARAMETERS: XY - points, array[0..N-1,0..1]. XY[I,0:1] corresponds to the Ith point. XY[N-1,0:1] must be different from XY[0,0:1]. Order of points is important! N - points count, N>=3 for other types of splines. ST - spline type: * 1 Catmull-Rom spline (Tension=0) with cyclic boundary conditions * 2 cubic spline with cyclic boundary conditions PT - parameterization type: * 0 uniform * 1 chord length * 2 centripetal OUTPUT PARAMETERS: P - parametric spline interpolant NOTES: * this function assumes that there all consequent points are distinct. I.e. (x0,y0)<>(x1,y1), (x1,y1)<>(x2,y2), (x2,y2)<>(x3,y3) and so on. However, non-consequent points may coincide, i.e. we can have (x0,y0)= =(x2,y2). * last point of sequence is NOT equal to the first point. You shouldn't make curve "explicitly periodic" by making them equal. -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline2buildperiodic(double[,] xy, int n, int st, int pt, out pspline2interpolant p) { p = new pspline2interpolant(); parametric.pspline2buildperiodic(xy, n, st, pt, p.innerobj, null); } public static void pspline2buildperiodic(double[,] xy, int n, int st, int pt, out pspline2interpolant p, alglib.xparams _params) { p = new pspline2interpolant(); parametric.pspline2buildperiodic(xy, n, st, pt, p.innerobj, _params); } /************************************************************************* This function builds periodic 3-dimensional parametric spline which starts at (X[0],Y[0],Z[0]), goes through all points to (X[N-1],Y[N-1],Z[N-1]) and then back to (X[0],Y[0],Z[0]). Same as PSpline2Build() function, but for 3D, so we won't duplicate its description here. -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline3buildperiodic(double[,] xy, int n, int st, int pt, out pspline3interpolant p) { p = new pspline3interpolant(); parametric.pspline3buildperiodic(xy, n, st, pt, p.innerobj, null); } public static void pspline3buildperiodic(double[,] xy, int n, int st, int pt, out pspline3interpolant p, alglib.xparams _params) { p = new pspline3interpolant(); parametric.pspline3buildperiodic(xy, n, st, pt, p.innerobj, _params); } /************************************************************************* This function returns vector of parameter values correspoding to points. I.e. for P created from (X[0],Y[0])...(X[N-1],Y[N-1]) and U=TValues(P) we have (X[0],Y[0]) = PSpline2Calc(P,U[0]), (X[1],Y[1]) = PSpline2Calc(P,U[1]), (X[2],Y[2]) = PSpline2Calc(P,U[2]), ... INPUT PARAMETERS: P - parametric spline interpolant OUTPUT PARAMETERS: N - array size T - array[0..N-1] NOTES: * for non-periodic splines U[0]=0, U[0]1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-position Y - Y-position -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline2calc(pspline2interpolant p, double t, out double x, out double y) { x = 0; y = 0; parametric.pspline2calc(p.innerobj, t, ref x, ref y, null); } public static void pspline2calc(pspline2interpolant p, double t, out double x, out double y, alglib.xparams _params) { x = 0; y = 0; parametric.pspline2calc(p.innerobj, t, ref x, ref y, _params); } /************************************************************************* This function calculates the value of the parametric spline for a given value of parameter T. INPUT PARAMETERS: P - parametric spline interpolant T - point: * T in [0,1] corresponds to interval spanned by points * for non-periodic splines T<0 (or T>1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-position Y - Y-position Z - Z-position -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline3calc(pspline3interpolant p, double t, out double x, out double y, out double z) { x = 0; y = 0; z = 0; parametric.pspline3calc(p.innerobj, t, ref x, ref y, ref z, null); } public static void pspline3calc(pspline3interpolant p, double t, out double x, out double y, out double z, alglib.xparams _params) { x = 0; y = 0; z = 0; parametric.pspline3calc(p.innerobj, t, ref x, ref y, ref z, _params); } /************************************************************************* This function calculates tangent vector for a given value of parameter T INPUT PARAMETERS: P - parametric spline interpolant T - point: * T in [0,1] corresponds to interval spanned by points * for non-periodic splines T<0 (or T>1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-component of tangent vector (normalized) Y - Y-component of tangent vector (normalized) NOTE: X^2+Y^2 is either 1 (for non-zero tangent vector) or 0. -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline2tangent(pspline2interpolant p, double t, out double x, out double y) { x = 0; y = 0; parametric.pspline2tangent(p.innerobj, t, ref x, ref y, null); } public static void pspline2tangent(pspline2interpolant p, double t, out double x, out double y, alglib.xparams _params) { x = 0; y = 0; parametric.pspline2tangent(p.innerobj, t, ref x, ref y, _params); } /************************************************************************* This function calculates tangent vector for a given value of parameter T INPUT PARAMETERS: P - parametric spline interpolant T - point: * T in [0,1] corresponds to interval spanned by points * for non-periodic splines T<0 (or T>1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-component of tangent vector (normalized) Y - Y-component of tangent vector (normalized) Z - Z-component of tangent vector (normalized) NOTE: X^2+Y^2+Z^2 is either 1 (for non-zero tangent vector) or 0. -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline3tangent(pspline3interpolant p, double t, out double x, out double y, out double z) { x = 0; y = 0; z = 0; parametric.pspline3tangent(p.innerobj, t, ref x, ref y, ref z, null); } public static void pspline3tangent(pspline3interpolant p, double t, out double x, out double y, out double z, alglib.xparams _params) { x = 0; y = 0; z = 0; parametric.pspline3tangent(p.innerobj, t, ref x, ref y, ref z, _params); } /************************************************************************* This function calculates derivative, i.e. it returns (dX/dT,dY/dT). INPUT PARAMETERS: P - parametric spline interpolant T - point: * T in [0,1] corresponds to interval spanned by points * for non-periodic splines T<0 (or T>1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-value DX - X-derivative Y - Y-value DY - Y-derivative -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline2diff(pspline2interpolant p, double t, out double x, out double dx, out double y, out double dy) { x = 0; dx = 0; y = 0; dy = 0; parametric.pspline2diff(p.innerobj, t, ref x, ref dx, ref y, ref dy, null); } public static void pspline2diff(pspline2interpolant p, double t, out double x, out double dx, out double y, out double dy, alglib.xparams _params) { x = 0; dx = 0; y = 0; dy = 0; parametric.pspline2diff(p.innerobj, t, ref x, ref dx, ref y, ref dy, _params); } /************************************************************************* This function calculates derivative, i.e. it returns (dX/dT,dY/dT,dZ/dT). INPUT PARAMETERS: P - parametric spline interpolant T - point: * T in [0,1] corresponds to interval spanned by points * for non-periodic splines T<0 (or T>1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-value DX - X-derivative Y - Y-value DY - Y-derivative Z - Z-value DZ - Z-derivative -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline3diff(pspline3interpolant p, double t, out double x, out double dx, out double y, out double dy, out double z, out double dz) { x = 0; dx = 0; y = 0; dy = 0; z = 0; dz = 0; parametric.pspline3diff(p.innerobj, t, ref x, ref dx, ref y, ref dy, ref z, ref dz, null); } public static void pspline3diff(pspline3interpolant p, double t, out double x, out double dx, out double y, out double dy, out double z, out double dz, alglib.xparams _params) { x = 0; dx = 0; y = 0; dy = 0; z = 0; dz = 0; parametric.pspline3diff(p.innerobj, t, ref x, ref dx, ref y, ref dy, ref z, ref dz, _params); } /************************************************************************* This function calculates first and second derivative with respect to T. INPUT PARAMETERS: P - parametric spline interpolant T - point: * T in [0,1] corresponds to interval spanned by points * for non-periodic splines T<0 (or T>1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-value DX - derivative D2X - second derivative Y - Y-value DY - derivative D2Y - second derivative -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline2diff2(pspline2interpolant p, double t, out double x, out double dx, out double d2x, out double y, out double dy, out double d2y) { x = 0; dx = 0; d2x = 0; y = 0; dy = 0; d2y = 0; parametric.pspline2diff2(p.innerobj, t, ref x, ref dx, ref d2x, ref y, ref dy, ref d2y, null); } public static void pspline2diff2(pspline2interpolant p, double t, out double x, out double dx, out double d2x, out double y, out double dy, out double d2y, alglib.xparams _params) { x = 0; dx = 0; d2x = 0; y = 0; dy = 0; d2y = 0; parametric.pspline2diff2(p.innerobj, t, ref x, ref dx, ref d2x, ref y, ref dy, ref d2y, _params); } /************************************************************************* This function calculates first and second derivative with respect to T. INPUT PARAMETERS: P - parametric spline interpolant T - point: * T in [0,1] corresponds to interval spanned by points * for non-periodic splines T<0 (or T>1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-value DX - derivative D2X - second derivative Y - Y-value DY - derivative D2Y - second derivative Z - Z-value DZ - derivative D2Z - second derivative -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline3diff2(pspline3interpolant p, double t, out double x, out double dx, out double d2x, out double y, out double dy, out double d2y, out double z, out double dz, out double d2z) { x = 0; dx = 0; d2x = 0; y = 0; dy = 0; d2y = 0; z = 0; dz = 0; d2z = 0; parametric.pspline3diff2(p.innerobj, t, ref x, ref dx, ref d2x, ref y, ref dy, ref d2y, ref z, ref dz, ref d2z, null); } public static void pspline3diff2(pspline3interpolant p, double t, out double x, out double dx, out double d2x, out double y, out double dy, out double d2y, out double z, out double dz, out double d2z, alglib.xparams _params) { x = 0; dx = 0; d2x = 0; y = 0; dy = 0; d2y = 0; z = 0; dz = 0; d2z = 0; parametric.pspline3diff2(p.innerobj, t, ref x, ref dx, ref d2x, ref y, ref dy, ref d2y, ref z, ref dz, ref d2z, _params); } /************************************************************************* This function calculates arc length, i.e. length of curve between t=a and t=b. INPUT PARAMETERS: P - parametric spline interpolant A,B - parameter values corresponding to arc ends: * B>A will result in positive length returned * BA will result in positive length returned * B1 OldYCount - old Y-count, OldYCount>1 OldXCount - old X-count, OldXCount>1 NewZCount - new Z-count, NewZCount>1 NewYCount - new Y-count, NewYCount>1 NewXCount - new X-count, NewXCount>1 OUTPUT PARAMETERS: B - array[0..NewXCount*NewYCount*NewZCount-1], function values at the new grid: B[0] x=0,y=0,z=0 B[1] x=1,y=0,z=0 B[..] ... B[..] x=newxcount-1,y=0,z=0 B[..] x=0,y=1,z=0 B[..] ... ... -- ALGLIB routine -- 26.04.2012 Copyright by Bochkanov Sergey *************************************************************************/ public static void spline3dresampletrilinear(double[] a, int oldzcount, int oldycount, int oldxcount, int newzcount, int newycount, int newxcount, out double[] b) { b = new double[0]; spline3d.spline3dresampletrilinear(a, oldzcount, oldycount, oldxcount, newzcount, newycount, newxcount, ref b, null); } public static void spline3dresampletrilinear(double[] a, int oldzcount, int oldycount, int oldxcount, int newzcount, int newycount, int newxcount, out double[] b, alglib.xparams _params) { b = new double[0]; spline3d.spline3dresampletrilinear(a, oldzcount, oldycount, oldxcount, newzcount, newycount, newxcount, ref b, _params); } /************************************************************************* This subroutine builds trilinear vector-valued spline. INPUT PARAMETERS: X - spline abscissas, array[0..N-1] Y - spline ordinates, array[0..M-1] Z - spline applicates, array[0..L-1] F - function values, array[0..M*N*L*D-1]: * first D elements store D values at (X[0],Y[0],Z[0]) * next D elements store D values at (X[1],Y[0],Z[0]) * next D elements store D values at (X[2],Y[0],Z[0]) * ... * next D elements store D values at (X[0],Y[1],Z[0]) * next D elements store D values at (X[1],Y[1],Z[0]) * next D elements store D values at (X[2],Y[1],Z[0]) * ... * next D elements store D values at (X[0],Y[0],Z[1]) * next D elements store D values at (X[1],Y[0],Z[1]) * next D elements store D values at (X[2],Y[0],Z[1]) * ... * general form - D function values at (X[i],Y[j]) are stored at F[D*(N*(M*K+J)+I)...D*(N*(M*K+J)+I)+D-1]. M,N, L - grid size, M>=2, N>=2, L>=2 D - vector dimension, D>=1 OUTPUT PARAMETERS: C - spline interpolant -- ALGLIB PROJECT -- Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline3dbuildtrilinearv(double[] x, int n, double[] y, int m, double[] z, int l, double[] f, int d, out spline3dinterpolant c) { c = new spline3dinterpolant(); spline3d.spline3dbuildtrilinearv(x, n, y, m, z, l, f, d, c.innerobj, null); } public static void spline3dbuildtrilinearv(double[] x, int n, double[] y, int m, double[] z, int l, double[] f, int d, out spline3dinterpolant c, alglib.xparams _params) { c = new spline3dinterpolant(); spline3d.spline3dbuildtrilinearv(x, n, y, m, z, l, f, d, c.innerobj, _params); } /************************************************************************* This subroutine calculates bilinear or bicubic vector-valued spline at the given point (X,Y,Z). INPUT PARAMETERS: C - spline interpolant. X, Y, Z - point F - output buffer, possibly preallocated array. In case array size is large enough to store result, it is not reallocated. Array which is too short will be reallocated OUTPUT PARAMETERS: F - array[D] (or larger) which stores function values -- ALGLIB PROJECT -- Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline3dcalcvbuf(spline3dinterpolant c, double x, double y, double z, ref double[] f) { spline3d.spline3dcalcvbuf(c.innerobj, x, y, z, ref f, null); } public static void spline3dcalcvbuf(spline3dinterpolant c, double x, double y, double z, ref double[] f, alglib.xparams _params) { spline3d.spline3dcalcvbuf(c.innerobj, x, y, z, ref f, _params); } /************************************************************************* This subroutine calculates trilinear or tricubic vector-valued spline at the given point (X,Y,Z). INPUT PARAMETERS: C - spline interpolant. X, Y, Z - point OUTPUT PARAMETERS: F - array[D] which stores function values. F is out-parameter and it is reallocated after call to this function. In case you want to reuse previously allocated F, you may use Spline2DCalcVBuf(), which reallocates F only when it is too small. -- ALGLIB PROJECT -- Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline3dcalcv(spline3dinterpolant c, double x, double y, double z, out double[] f) { f = new double[0]; spline3d.spline3dcalcv(c.innerobj, x, y, z, ref f, null); } public static void spline3dcalcv(spline3dinterpolant c, double x, double y, double z, out double[] f, alglib.xparams _params) { f = new double[0]; spline3d.spline3dcalcv(c.innerobj, x, y, z, ref f, _params); } /************************************************************************* This subroutine unpacks tri-dimensional spline into the coefficients table INPUT PARAMETERS: C - spline interpolant. Result: N - grid size (X) M - grid size (Y) L - grid size (Z) D - number of components SType- spline type. Currently, only one spline type is supported: trilinear spline, as indicated by SType=1. Tbl - spline coefficients: [0..(N-1)*(M-1)*(L-1)*D-1, 0..13]. For T=0..D-1 (component index), I = 0...N-2 (x index), J=0..M-2 (y index), K=0..L-2 (z index): Q := T + I*D + J*D*(N-1) + K*D*(N-1)*(M-1), Q-th row stores decomposition for T-th component of the vector-valued function Tbl[Q,0] = X[i] Tbl[Q,1] = X[i+1] Tbl[Q,2] = Y[j] Tbl[Q,3] = Y[j+1] Tbl[Q,4] = Z[k] Tbl[Q,5] = Z[k+1] Tbl[Q,6] = C000 Tbl[Q,7] = C100 Tbl[Q,8] = C010 Tbl[Q,9] = C110 Tbl[Q,10]= C001 Tbl[Q,11]= C101 Tbl[Q,12]= C011 Tbl[Q,13]= C111 On each grid square spline is equals to: S(x) = SUM(c[i,j,k]*(x^i)*(y^j)*(z^k), i=0..1, j=0..1, k=0..1) t = x-x[j] u = y-y[i] v = z-z[k] NOTE: format of Tbl is given for SType=1. Future versions of ALGLIB can use different formats for different values of SType. -- ALGLIB PROJECT -- Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline3dunpackv(spline3dinterpolant c, out int n, out int m, out int l, out int d, out int stype, out double[,] tbl) { n = 0; m = 0; l = 0; d = 0; stype = 0; tbl = new double[0,0]; spline3d.spline3dunpackv(c.innerobj, ref n, ref m, ref l, ref d, ref stype, ref tbl, null); } public static void spline3dunpackv(spline3dinterpolant c, out int n, out int m, out int l, out int d, out int stype, out double[,] tbl, alglib.xparams _params) { n = 0; m = 0; l = 0; d = 0; stype = 0; tbl = new double[0,0]; spline3d.spline3dunpackv(c.innerobj, ref n, ref m, ref l, ref d, ref stype, ref tbl, _params); } } public partial class alglib { /************************************************************************* Conversion from barycentric representation to Chebyshev basis. This function has O(N^2) complexity. INPUT PARAMETERS: P - polynomial in barycentric form A,B - base interval for Chebyshev polynomials (see below) A<>B OUTPUT PARAMETERS T - coefficients of Chebyshev representation; P(x) = sum { T[i]*Ti(2*(x-A)/(B-A)-1), i=0..N-1 }, where Ti - I-th Chebyshev polynomial. NOTES: barycentric interpolant passed as P may be either polynomial obtained from polynomial interpolation/ fitting or rational function which is NOT polynomial. We can't distinguish between these two cases, and this algorithm just tries to work assuming that P IS a polynomial. If not, algorithm will return results, but they won't have any meaning. -- ALGLIB -- Copyright 30.09.2010 by Bochkanov Sergey *************************************************************************/ public static void polynomialbar2cheb(barycentricinterpolant p, double a, double b, out double[] t) { t = new double[0]; polint.polynomialbar2cheb(p.innerobj, a, b, ref t, null); } public static void polynomialbar2cheb(barycentricinterpolant p, double a, double b, out double[] t, alglib.xparams _params) { t = new double[0]; polint.polynomialbar2cheb(p.innerobj, a, b, ref t, _params); } /************************************************************************* Conversion from Chebyshev basis to barycentric representation. This function has O(N^2) complexity. INPUT PARAMETERS: T - coefficients of Chebyshev representation; P(x) = sum { T[i]*Ti(2*(x-A)/(B-A)-1), i=0..N }, where Ti - I-th Chebyshev polynomial. N - number of coefficients: * if given, only leading N elements of T are used * if not given, automatically determined from size of T A,B - base interval for Chebyshev polynomials (see above) A0. OUTPUT PARAMETERS A - coefficients, P(x) = sum { A[i]*((X-C)/S)^i, i=0..N-1 } N - number of coefficients (polynomial degree plus 1) NOTES: 1. this function accepts offset and scale, which can be set to improve numerical properties of polynomial. For example, if P was obtained as result of interpolation on [-1,+1], you can set C=0 and S=1 and represent P as sum of 1, x, x^2, x^3 and so on. In most cases you it is exactly what you need. However, if your interpolation model was built on [999,1001], you will see significant growth of numerical errors when using {1, x, x^2, x^3} as basis. Representing P as sum of 1, (x-1000), (x-1000)^2, (x-1000)^3 will be better option. Such representation can be obtained by using 1000.0 as offset C and 1.0 as scale S. 2. power basis is ill-conditioned and tricks described above can't solve this problem completely. This function will return coefficients in any case, but for N>8 they will become unreliable. However, N's less than 5 are pretty safe. 3. barycentric interpolant passed as P may be either polynomial obtained from polynomial interpolation/ fitting or rational function which is NOT polynomial. We can't distinguish between these two cases, and this algorithm just tries to work assuming that P IS a polynomial. If not, algorithm will return results, but they won't have any meaning. -- ALGLIB -- Copyright 30.09.2010 by Bochkanov Sergey *************************************************************************/ public static void polynomialbar2pow(barycentricinterpolant p, double c, double s, out double[] a) { a = new double[0]; polint.polynomialbar2pow(p.innerobj, c, s, ref a, null); } public static void polynomialbar2pow(barycentricinterpolant p, double c, double s, out double[] a, alglib.xparams _params) { a = new double[0]; polint.polynomialbar2pow(p.innerobj, c, s, ref a, _params); } public static void polynomialbar2pow(barycentricinterpolant p, out double[] a) { double c; double s; a = new double[0]; c = 0; s = 1; polint.polynomialbar2pow(p.innerobj, c, s, ref a, null); return; } public static void polynomialbar2pow(barycentricinterpolant p, out double[] a, alglib.xparams _params) { double c; double s; a = new double[0]; c = 0; s = 1; polint.polynomialbar2pow(p.innerobj, c, s, ref a, _params); return; } /************************************************************************* Conversion from power basis to barycentric representation. This function has O(N^2) complexity. INPUT PARAMETERS: A - coefficients, P(x) = sum { A[i]*((X-C)/S)^i, i=0..N-1 } N - number of coefficients (polynomial degree plus 1) * if given, only leading N elements of A are used * if not given, automatically determined from size of A C - offset (see below); 0.0 is used as default value. S - scale (see below); 1.0 is used as default value. S<>0. OUTPUT PARAMETERS P - polynomial in barycentric form NOTES: 1. this function accepts offset and scale, which can be set to improve numerical properties of polynomial. For example, if you interpolate on [-1,+1], you can set C=0 and S=1 and convert from sum of 1, x, x^2, x^3 and so on. In most cases you it is exactly what you need. However, if your interpolation model was built on [999,1001], you will see significant growth of numerical errors when using {1, x, x^2, x^3} as input basis. Converting from sum of 1, (x-1000), (x-1000)^2, (x-1000)^3 will be better option (you have to specify 1000.0 as offset C and 1.0 as scale S). 2. power basis is ill-conditioned and tricks described above can't solve this problem completely. This function will return barycentric model in any case, but for N>8 accuracy well degrade. However, N's less than 5 are pretty safe. -- ALGLIB -- Copyright 30.09.2010 by Bochkanov Sergey *************************************************************************/ public static void polynomialpow2bar(double[] a, int n, double c, double s, out barycentricinterpolant p) { p = new barycentricinterpolant(); polint.polynomialpow2bar(a, n, c, s, p.innerobj, null); } public static void polynomialpow2bar(double[] a, int n, double c, double s, out barycentricinterpolant p, alglib.xparams _params) { p = new barycentricinterpolant(); polint.polynomialpow2bar(a, n, c, s, p.innerobj, _params); } public static void polynomialpow2bar(double[] a, out barycentricinterpolant p) { int n; double c; double s; p = new barycentricinterpolant(); n = ap.len(a); c = 0; s = 1; polint.polynomialpow2bar(a, n, c, s, p.innerobj, null); return; } public static void polynomialpow2bar(double[] a, out barycentricinterpolant p, alglib.xparams _params) { int n; double c; double s; p = new barycentricinterpolant(); n = ap.len(a); c = 0; s = 1; polint.polynomialpow2bar(a, n, c, s, p.innerobj, _params); return; } /************************************************************************* Lagrange intepolant: generation of the model on the general grid. This function has O(N^2) complexity. INPUT PARAMETERS: X - abscissas, array[0..N-1] Y - function values, array[0..N-1] N - number of points, N>=1 OUTPUT PARAMETERS P - barycentric model which represents Lagrange interpolant (see ratint unit info and BarycentricCalc() description for more information). -- ALGLIB -- Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ public static void polynomialbuild(double[] x, double[] y, int n, out barycentricinterpolant p) { p = new barycentricinterpolant(); polint.polynomialbuild(x, y, n, p.innerobj, null); } public static void polynomialbuild(double[] x, double[] y, int n, out barycentricinterpolant p, alglib.xparams _params) { p = new barycentricinterpolant(); polint.polynomialbuild(x, y, n, p.innerobj, _params); } public static void polynomialbuild(double[] x, double[] y, out barycentricinterpolant p) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'polynomialbuild': looks like one of arguments has wrong size"); p = new barycentricinterpolant(); n = ap.len(x); polint.polynomialbuild(x, y, n, p.innerobj, null); return; } public static void polynomialbuild(double[] x, double[] y, out barycentricinterpolant p, alglib.xparams _params) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'polynomialbuild': looks like one of arguments has wrong size"); p = new barycentricinterpolant(); n = ap.len(x); polint.polynomialbuild(x, y, n, p.innerobj, _params); return; } /************************************************************************* Lagrange intepolant: generation of the model on equidistant grid. This function has O(N) complexity. INPUT PARAMETERS: A - left boundary of [A,B] B - right boundary of [A,B] Y - function values at the nodes, array[0..N-1] N - number of points, N>=1 for N=1 a constant model is constructed. OUTPUT PARAMETERS P - barycentric model which represents Lagrange interpolant (see ratint unit info and BarycentricCalc() description for more information). -- ALGLIB -- Copyright 03.12.2009 by Bochkanov Sergey *************************************************************************/ public static void polynomialbuildeqdist(double a, double b, double[] y, int n, out barycentricinterpolant p) { p = new barycentricinterpolant(); polint.polynomialbuildeqdist(a, b, y, n, p.innerobj, null); } public static void polynomialbuildeqdist(double a, double b, double[] y, int n, out barycentricinterpolant p, alglib.xparams _params) { p = new barycentricinterpolant(); polint.polynomialbuildeqdist(a, b, y, n, p.innerobj, _params); } public static void polynomialbuildeqdist(double a, double b, double[] y, out barycentricinterpolant p) { int n; p = new barycentricinterpolant(); n = ap.len(y); polint.polynomialbuildeqdist(a, b, y, n, p.innerobj, null); return; } public static void polynomialbuildeqdist(double a, double b, double[] y, out barycentricinterpolant p, alglib.xparams _params) { int n; p = new barycentricinterpolant(); n = ap.len(y); polint.polynomialbuildeqdist(a, b, y, n, p.innerobj, _params); return; } /************************************************************************* Lagrange intepolant on Chebyshev grid (first kind). This function has O(N) complexity. INPUT PARAMETERS: A - left boundary of [A,B] B - right boundary of [A,B] Y - function values at the nodes, array[0..N-1], Y[I] = Y(0.5*(B+A) + 0.5*(B-A)*Cos(PI*(2*i+1)/(2*n))) N - number of points, N>=1 for N=1 a constant model is constructed. OUTPUT PARAMETERS P - barycentric model which represents Lagrange interpolant (see ratint unit info and BarycentricCalc() description for more information). -- ALGLIB -- Copyright 03.12.2009 by Bochkanov Sergey *************************************************************************/ public static void polynomialbuildcheb1(double a, double b, double[] y, int n, out barycentricinterpolant p) { p = new barycentricinterpolant(); polint.polynomialbuildcheb1(a, b, y, n, p.innerobj, null); } public static void polynomialbuildcheb1(double a, double b, double[] y, int n, out barycentricinterpolant p, alglib.xparams _params) { p = new barycentricinterpolant(); polint.polynomialbuildcheb1(a, b, y, n, p.innerobj, _params); } public static void polynomialbuildcheb1(double a, double b, double[] y, out barycentricinterpolant p) { int n; p = new barycentricinterpolant(); n = ap.len(y); polint.polynomialbuildcheb1(a, b, y, n, p.innerobj, null); return; } public static void polynomialbuildcheb1(double a, double b, double[] y, out barycentricinterpolant p, alglib.xparams _params) { int n; p = new barycentricinterpolant(); n = ap.len(y); polint.polynomialbuildcheb1(a, b, y, n, p.innerobj, _params); return; } /************************************************************************* Lagrange intepolant on Chebyshev grid (second kind). This function has O(N) complexity. INPUT PARAMETERS: A - left boundary of [A,B] B - right boundary of [A,B] Y - function values at the nodes, array[0..N-1], Y[I] = Y(0.5*(B+A) + 0.5*(B-A)*Cos(PI*i/(n-1))) N - number of points, N>=1 for N=1 a constant model is constructed. OUTPUT PARAMETERS P - barycentric model which represents Lagrange interpolant (see ratint unit info and BarycentricCalc() description for more information). -- ALGLIB -- Copyright 03.12.2009 by Bochkanov Sergey *************************************************************************/ public static void polynomialbuildcheb2(double a, double b, double[] y, int n, out barycentricinterpolant p) { p = new barycentricinterpolant(); polint.polynomialbuildcheb2(a, b, y, n, p.innerobj, null); } public static void polynomialbuildcheb2(double a, double b, double[] y, int n, out barycentricinterpolant p, alglib.xparams _params) { p = new barycentricinterpolant(); polint.polynomialbuildcheb2(a, b, y, n, p.innerobj, _params); } public static void polynomialbuildcheb2(double a, double b, double[] y, out barycentricinterpolant p) { int n; p = new barycentricinterpolant(); n = ap.len(y); polint.polynomialbuildcheb2(a, b, y, n, p.innerobj, null); return; } public static void polynomialbuildcheb2(double a, double b, double[] y, out barycentricinterpolant p, alglib.xparams _params) { int n; p = new barycentricinterpolant(); n = ap.len(y); polint.polynomialbuildcheb2(a, b, y, n, p.innerobj, _params); return; } /************************************************************************* Fast equidistant polynomial interpolation function with O(N) complexity INPUT PARAMETERS: A - left boundary of [A,B] B - right boundary of [A,B] F - function values, array[0..N-1] N - number of points on equidistant grid, N>=1 for N=1 a constant model is constructed. T - position where P(x) is calculated RESULT value of the Lagrange interpolant at T IMPORTANT this function provides fast interface which is not overflow-safe nor it is very precise. the best option is to use PolynomialBuildEqDist()/BarycentricCalc() subroutines unless you are pretty sure that your data will not result in overflow. -- ALGLIB -- Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ public static double polynomialcalceqdist(double a, double b, double[] f, int n, double t) { return polint.polynomialcalceqdist(a, b, f, n, t, null); } public static double polynomialcalceqdist(double a, double b, double[] f, int n, double t, alglib.xparams _params) { return polint.polynomialcalceqdist(a, b, f, n, t, _params); } public static double polynomialcalceqdist(double a, double b, double[] f, double t) { int n; n = ap.len(f); double result = polint.polynomialcalceqdist(a, b, f, n, t, null); return result; } public static double polynomialcalceqdist(double a, double b, double[] f, double t, alglib.xparams _params) { int n; n = ap.len(f); double result = polint.polynomialcalceqdist(a, b, f, n, t, _params); return result; } /************************************************************************* Fast polynomial interpolation function on Chebyshev points (first kind) with O(N) complexity. INPUT PARAMETERS: A - left boundary of [A,B] B - right boundary of [A,B] F - function values, array[0..N-1] N - number of points on Chebyshev grid (first kind), X[i] = 0.5*(B+A) + 0.5*(B-A)*Cos(PI*(2*i+1)/(2*n)) for N=1 a constant model is constructed. T - position where P(x) is calculated RESULT value of the Lagrange interpolant at T IMPORTANT this function provides fast interface which is not overflow-safe nor it is very precise. the best option is to use PolIntBuildCheb1()/BarycentricCalc() subroutines unless you are pretty sure that your data will not result in overflow. -- ALGLIB -- Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ public static double polynomialcalccheb1(double a, double b, double[] f, int n, double t) { return polint.polynomialcalccheb1(a, b, f, n, t, null); } public static double polynomialcalccheb1(double a, double b, double[] f, int n, double t, alglib.xparams _params) { return polint.polynomialcalccheb1(a, b, f, n, t, _params); } public static double polynomialcalccheb1(double a, double b, double[] f, double t) { int n; n = ap.len(f); double result = polint.polynomialcalccheb1(a, b, f, n, t, null); return result; } public static double polynomialcalccheb1(double a, double b, double[] f, double t, alglib.xparams _params) { int n; n = ap.len(f); double result = polint.polynomialcalccheb1(a, b, f, n, t, _params); return result; } /************************************************************************* Fast polynomial interpolation function on Chebyshev points (second kind) with O(N) complexity. INPUT PARAMETERS: A - left boundary of [A,B] B - right boundary of [A,B] F - function values, array[0..N-1] N - number of points on Chebyshev grid (second kind), X[i] = 0.5*(B+A) + 0.5*(B-A)*Cos(PI*i/(n-1)) for N=1 a constant model is constructed. T - position where P(x) is calculated RESULT value of the Lagrange interpolant at T IMPORTANT this function provides fast interface which is not overflow-safe nor it is very precise. the best option is to use PolIntBuildCheb2()/BarycentricCalc() subroutines unless you are pretty sure that your data will not result in overflow. -- ALGLIB -- Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ public static double polynomialcalccheb2(double a, double b, double[] f, int n, double t) { return polint.polynomialcalccheb2(a, b, f, n, t, null); } public static double polynomialcalccheb2(double a, double b, double[] f, int n, double t, alglib.xparams _params) { return polint.polynomialcalccheb2(a, b, f, n, t, _params); } public static double polynomialcalccheb2(double a, double b, double[] f, double t) { int n; n = ap.len(f); double result = polint.polynomialcalccheb2(a, b, f, n, t, null); return result; } public static double polynomialcalccheb2(double a, double b, double[] f, double t, alglib.xparams _params) { int n; n = ap.len(f); double result = polint.polynomialcalccheb2(a, b, f, n, t, _params); return result; } } public partial class alglib { /************************************************************************* Polynomial fitting report: TaskRCond reciprocal of task's condition number RMSError RMS error AvgError average error AvgRelError average relative error (for non-zero Y[I]) MaxError maximum error *************************************************************************/ public class polynomialfitreport : alglibobject { // // Public declarations // public double taskrcond { get { return _innerobj.taskrcond; } set { _innerobj.taskrcond = value; } } public double rmserror { get { return _innerobj.rmserror; } set { _innerobj.rmserror = value; } } public double avgerror { get { return _innerobj.avgerror; } set { _innerobj.avgerror = value; } } public double avgrelerror { get { return _innerobj.avgrelerror; } set { _innerobj.avgrelerror = value; } } public double maxerror { get { return _innerobj.maxerror; } set { _innerobj.maxerror = value; } } public polynomialfitreport() { _innerobj = new lsfit.polynomialfitreport(); } public override alglib.alglibobject make_copy() { return new polynomialfitreport((lsfit.polynomialfitreport)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private lsfit.polynomialfitreport _innerobj; public lsfit.polynomialfitreport innerobj { get { return _innerobj; } } public polynomialfitreport(lsfit.polynomialfitreport obj) { _innerobj = obj; } } /************************************************************************* Barycentric fitting report: RMSError RMS error AvgError average error AvgRelError average relative error (for non-zero Y[I]) MaxError maximum error TaskRCond reciprocal of task's condition number *************************************************************************/ public class barycentricfitreport : alglibobject { // // Public declarations // public double taskrcond { get { return _innerobj.taskrcond; } set { _innerobj.taskrcond = value; } } public int dbest { get { return _innerobj.dbest; } set { _innerobj.dbest = value; } } public double rmserror { get { return _innerobj.rmserror; } set { _innerobj.rmserror = value; } } public double avgerror { get { return _innerobj.avgerror; } set { _innerobj.avgerror = value; } } public double avgrelerror { get { return _innerobj.avgrelerror; } set { _innerobj.avgrelerror = value; } } public double maxerror { get { return _innerobj.maxerror; } set { _innerobj.maxerror = value; } } public barycentricfitreport() { _innerobj = new lsfit.barycentricfitreport(); } public override alglib.alglibobject make_copy() { return new barycentricfitreport((lsfit.barycentricfitreport)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private lsfit.barycentricfitreport _innerobj; public lsfit.barycentricfitreport innerobj { get { return _innerobj; } } public barycentricfitreport(lsfit.barycentricfitreport obj) { _innerobj = obj; } } /************************************************************************* Least squares fitting report. This structure contains informational fields which are set by fitting functions provided by this unit. Different functions initialize different sets of fields, so you should read documentation on specific function you used in order to know which fields are initialized. TaskRCond reciprocal of task's condition number IterationsCount number of internal iterations VarIdx if user-supplied gradient contains errors which were detected by nonlinear fitter, this field is set to index of the first component of gradient which is suspected to be spoiled by bugs. RMSError RMS error AvgError average error AvgRelError average relative error (for non-zero Y[I]) MaxError maximum error WRMSError weighted RMS error CovPar covariance matrix for parameters, filled by some solvers ErrPar vector of errors in parameters, filled by some solvers ErrCurve vector of fit errors - variability of the best-fit curve, filled by some solvers. Noise vector of per-point noise estimates, filled by some solvers. R2 coefficient of determination (non-weighted, non-adjusted), filled by some solvers. *************************************************************************/ public class lsfitreport : alglibobject { // // Public declarations // public double taskrcond { get { return _innerobj.taskrcond; } set { _innerobj.taskrcond = value; } } public int iterationscount { get { return _innerobj.iterationscount; } set { _innerobj.iterationscount = value; } } public int varidx { get { return _innerobj.varidx; } set { _innerobj.varidx = value; } } public double rmserror { get { return _innerobj.rmserror; } set { _innerobj.rmserror = value; } } public double avgerror { get { return _innerobj.avgerror; } set { _innerobj.avgerror = value; } } public double avgrelerror { get { return _innerobj.avgrelerror; } set { _innerobj.avgrelerror = value; } } public double maxerror { get { return _innerobj.maxerror; } set { _innerobj.maxerror = value; } } public double wrmserror { get { return _innerobj.wrmserror; } set { _innerobj.wrmserror = value; } } public double[,] covpar { get { return _innerobj.covpar; } set { _innerobj.covpar = value; } } public double[] errpar { get { return _innerobj.errpar; } set { _innerobj.errpar = value; } } public double[] errcurve { get { return _innerobj.errcurve; } set { _innerobj.errcurve = value; } } public double[] noise { get { return _innerobj.noise; } set { _innerobj.noise = value; } } public double r2 { get { return _innerobj.r2; } set { _innerobj.r2 = value; } } public lsfitreport() { _innerobj = new lsfit.lsfitreport(); } public override alglib.alglibobject make_copy() { return new lsfitreport((lsfit.lsfitreport)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private lsfit.lsfitreport _innerobj; public lsfit.lsfitreport innerobj { get { return _innerobj; } } public lsfitreport(lsfit.lsfitreport obj) { _innerobj = obj; } } /************************************************************************* Nonlinear fitter. You should use ALGLIB functions to work with fitter. Never try to access its fields directly! *************************************************************************/ public class lsfitstate : alglibobject { // // Public declarations // public bool needf { get { return _innerobj.needf; } set { _innerobj.needf = value; } } public bool needfg { get { return _innerobj.needfg; } set { _innerobj.needfg = value; } } public bool needfgh { get { return _innerobj.needfgh; } set { _innerobj.needfgh = value; } } public bool xupdated { get { return _innerobj.xupdated; } set { _innerobj.xupdated = value; } } public double[] c { get { return _innerobj.c; } } public double f { get { return _innerobj.f; } set { _innerobj.f = value; } } public double[] g { get { return _innerobj.g; } } public double[,] h { get { return _innerobj.h; } } public double[] x { get { return _innerobj.x; } } public lsfitstate() { _innerobj = new lsfit.lsfitstate(); } public override alglib.alglibobject make_copy() { return new lsfitstate((lsfit.lsfitstate)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private lsfit.lsfitstate _innerobj; public lsfit.lsfitstate innerobj { get { return _innerobj; } } public lsfitstate(lsfit.lsfitstate obj) { _innerobj = obj; } } /************************************************************************* This subroutine fits piecewise linear curve to points with Ramer-Douglas- Peucker algorithm, which stops after generating specified number of linear sections. IMPORTANT: * it does NOT perform least-squares fitting; it builds curve, but this curve does not minimize some least squares metric. See description of RDP algorithm (say, in Wikipedia) for more details on WHAT is performed. * this function does NOT work with parametric curves (i.e. curves which can be represented as {X(t),Y(t)}. It works with curves which can be represented as Y(X). Thus, it is impossible to model figures like circles with this functions. If you want to work with parametric curves, you should use ParametricRDPFixed() function provided by "Parametric" subpackage of "Interpolation" package. INPUT PARAMETERS: X - array of X-coordinates: * at least N elements * can be unordered (points are automatically sorted) * this function may accept non-distinct X (see below for more information on handling of such inputs) Y - array of Y-coordinates: * at least N elements N - number of elements in X/Y M - desired number of sections: * at most M sections are generated by this function * less than M sections can be generated if we have N0 * if given, only leading N elements of X/Y are used * if not given, automatically determined from sizes of X/Y M - number of basis functions (= polynomial_degree + 1), M>=1 OUTPUT PARAMETERS: Info- same format as in LSFitLinearW() subroutine: * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD P - interpolant in barycentric form. Rep - report, same format as in LSFitLinearW() subroutine. Following fields are set: * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED -- ALGLIB PROJECT -- Copyright 10.12.2009 by Bochkanov Sergey *************************************************************************/ public static void polynomialfit(double[] x, double[] y, int n, int m, out int info, out barycentricinterpolant p, out polynomialfitreport rep) { info = 0; p = new barycentricinterpolant(); rep = new polynomialfitreport(); lsfit.polynomialfit(x, y, n, m, ref info, p.innerobj, rep.innerobj, null); } public static void polynomialfit(double[] x, double[] y, int n, int m, out int info, out barycentricinterpolant p, out polynomialfitreport rep, alglib.xparams _params) { info = 0; p = new barycentricinterpolant(); rep = new polynomialfitreport(); lsfit.polynomialfit(x, y, n, m, ref info, p.innerobj, rep.innerobj, _params); } public static void polynomialfit(double[] x, double[] y, int m, out int info, out barycentricinterpolant p, out polynomialfitreport rep) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'polynomialfit': looks like one of arguments has wrong size"); info = 0; p = new barycentricinterpolant(); rep = new polynomialfitreport(); n = ap.len(x); lsfit.polynomialfit(x, y, n, m, ref info, p.innerobj, rep.innerobj, null); return; } public static void polynomialfit(double[] x, double[] y, int m, out int info, out barycentricinterpolant p, out polynomialfitreport rep, alglib.xparams _params) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'polynomialfit': looks like one of arguments has wrong size"); info = 0; p = new barycentricinterpolant(); rep = new polynomialfitreport(); n = ap.len(x); lsfit.polynomialfit(x, y, n, m, ref info, p.innerobj, rep.innerobj, _params); return; } /************************************************************************* Weighted fitting by polynomials in barycentric form, with constraints on function values or first derivatives. Small regularizing term is used when solving constrained tasks (to improve stability). Task is linear, so linear least squares solver is used. Complexity of this computational scheme is O(N*M^2), mostly dominated by least squares solver SEE ALSO: PolynomialFit() NOTES: you can convert P from barycentric form to the power or Chebyshev basis with PolynomialBar2Pow() or PolynomialBar2Cheb() functions from POLINT subpackage. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - points, array[0..N-1]. Y - function values, array[0..N-1]. W - weights, array[0..N-1] Each summand in square sum of approximation deviations from given values is multiplied by the square of corresponding weight. Fill it by 1's if you don't want to solve weighted task. N - number of points, N>0. * if given, only leading N elements of X/Y/W are used * if not given, automatically determined from sizes of X/Y/W XC - points where polynomial values/derivatives are constrained, array[0..K-1]. YC - values of constraints, array[0..K-1] DC - array[0..K-1], types of constraints: * DC[i]=0 means that P(XC[i])=YC[i] * DC[i]=1 means that P'(XC[i])=YC[i] SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS K - number of constraints, 0<=K=1 OUTPUT PARAMETERS: Info- same format as in LSFitLinearW() subroutine: * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD -3 means inconsistent constraints P - interpolant in barycentric form. Rep - report, same format as in LSFitLinearW() subroutine. Following fields are set: * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroitine doesn't calculate task's condition number for K<>0. SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: Setting constraints can lead to undesired results, like ill-conditioned behavior, or inconsistency being detected. From the other side, it allows us to improve quality of the fit. Here we summarize our experience with constrained regression splines: * even simple constraints can be inconsistent, see Wikipedia article on this subject: http://en.wikipedia.org/wiki/Birkhoff_interpolation * the greater is M (given fixed constraints), the more chances that constraints will be consistent * in the general case, consistency of constraints is NOT GUARANTEED. * in the one special cases, however, we can guarantee consistency. This case is: M>1 and constraints on the function values (NOT DERIVATIVES) Our final recommendation is to use constraints WHEN AND ONLY when you can't solve your task without them. Anything beyond special cases given above is not guaranteed and may result in inconsistency. -- ALGLIB PROJECT -- Copyright 10.12.2009 by Bochkanov Sergey *************************************************************************/ public static void polynomialfitwc(double[] x, double[] y, double[] w, int n, double[] xc, double[] yc, int[] dc, int k, int m, out int info, out barycentricinterpolant p, out polynomialfitreport rep) { info = 0; p = new barycentricinterpolant(); rep = new polynomialfitreport(); lsfit.polynomialfitwc(x, y, w, n, xc, yc, dc, k, m, ref info, p.innerobj, rep.innerobj, null); } public static void polynomialfitwc(double[] x, double[] y, double[] w, int n, double[] xc, double[] yc, int[] dc, int k, int m, out int info, out barycentricinterpolant p, out polynomialfitreport rep, alglib.xparams _params) { info = 0; p = new barycentricinterpolant(); rep = new polynomialfitreport(); lsfit.polynomialfitwc(x, y, w, n, xc, yc, dc, k, m, ref info, p.innerobj, rep.innerobj, _params); } public static void polynomialfitwc(double[] x, double[] y, double[] w, double[] xc, double[] yc, int[] dc, int m, out int info, out barycentricinterpolant p, out polynomialfitreport rep) { int n; int k; if( (ap.len(x)!=ap.len(y)) || (ap.len(x)!=ap.len(w))) throw new alglibexception("Error while calling 'polynomialfitwc': looks like one of arguments has wrong size"); if( (ap.len(xc)!=ap.len(yc)) || (ap.len(xc)!=ap.len(dc))) throw new alglibexception("Error while calling 'polynomialfitwc': looks like one of arguments has wrong size"); info = 0; p = new barycentricinterpolant(); rep = new polynomialfitreport(); n = ap.len(x); k = ap.len(xc); lsfit.polynomialfitwc(x, y, w, n, xc, yc, dc, k, m, ref info, p.innerobj, rep.innerobj, null); return; } public static void polynomialfitwc(double[] x, double[] y, double[] w, double[] xc, double[] yc, int[] dc, int m, out int info, out barycentricinterpolant p, out polynomialfitreport rep, alglib.xparams _params) { int n; int k; if( (ap.len(x)!=ap.len(y)) || (ap.len(x)!=ap.len(w))) throw new alglibexception("Error while calling 'polynomialfitwc': looks like one of arguments has wrong size"); if( (ap.len(xc)!=ap.len(yc)) || (ap.len(xc)!=ap.len(dc))) throw new alglibexception("Error while calling 'polynomialfitwc': looks like one of arguments has wrong size"); info = 0; p = new barycentricinterpolant(); rep = new polynomialfitreport(); n = ap.len(x); k = ap.len(xc); lsfit.polynomialfitwc(x, y, w, n, xc, yc, dc, k, m, ref info, p.innerobj, rep.innerobj, _params); return; } /************************************************************************* This function calculates value of four-parameter logistic (4PL) model at specified point X. 4PL model has following form: F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) INPUT PARAMETERS: X - current point, X>=0: * zero X is correctly handled even for B<=0 * negative X results in exception. A, B, C, D- parameters of 4PL model: * A is unconstrained * B is unconstrained; zero or negative values are handled correctly. * C>0, non-positive value results in exception * D is unconstrained RESULT: model value at X NOTE: if B=0, denominator is assumed to be equal to 2.0 even for zero X (strictly speaking, 0^0 is undefined). NOTE: this function also throws exception if all input parameters are correct, but overflow was detected during calculations. NOTE: this function performs a lot of checks; if you need really high performance, consider evaluating model yourself, without checking for degenerate cases. -- ALGLIB PROJECT -- Copyright 14.05.2014 by Bochkanov Sergey *************************************************************************/ public static double logisticcalc4(double x, double a, double b, double c, double d) { return lsfit.logisticcalc4(x, a, b, c, d, null); } public static double logisticcalc4(double x, double a, double b, double c, double d, alglib.xparams _params) { return lsfit.logisticcalc4(x, a, b, c, d, _params); } /************************************************************************* This function calculates value of five-parameter logistic (5PL) model at specified point X. 5PL model has following form: F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) INPUT PARAMETERS: X - current point, X>=0: * zero X is correctly handled even for B<=0 * negative X results in exception. A, B, C, D, G- parameters of 5PL model: * A is unconstrained * B is unconstrained; zero or negative values are handled correctly. * C>0, non-positive value results in exception * D is unconstrained * G>0, non-positive value results in exception RESULT: model value at X NOTE: if B=0, denominator is assumed to be equal to Power(2.0,G) even for zero X (strictly speaking, 0^0 is undefined). NOTE: this function also throws exception if all input parameters are correct, but overflow was detected during calculations. NOTE: this function performs a lot of checks; if you need really high performance, consider evaluating model yourself, without checking for degenerate cases. -- ALGLIB PROJECT -- Copyright 14.05.2014 by Bochkanov Sergey *************************************************************************/ public static double logisticcalc5(double x, double a, double b, double c, double d, double g) { return lsfit.logisticcalc5(x, a, b, c, d, g, null); } public static double logisticcalc5(double x, double a, double b, double c, double d, double g, alglib.xparams _params) { return lsfit.logisticcalc5(x, a, b, c, d, g, _params); } /************************************************************************* This function fits four-parameter logistic (4PL) model to data provided by user. 4PL model has following form: F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) Here: * A, D - unconstrained (see LogisticFit4EC() for constrained 4PL) * B>=0 * C>0 IMPORTANT: output of this function is constrained in such way that B>0. Because 4PL model is symmetric with respect to B, there is no need to explore B<0. Constraining B makes algorithm easier to stabilize and debug. Users who for some reason prefer to work with negative B's should transform output themselves (swap A and D, replace B by -B). 4PL fitting is implemented as follows: * we perform small number of restarts from random locations which helps to solve problem of bad local extrema. Locations are only partially random - we use input data to determine good initial guess, but we include controlled amount of randomness. * we perform Levenberg-Marquardt fitting with very tight constraints on parameters B and C - it allows us to find good initial guess for the second stage without risk of running into "flat spot". * second Levenberg-Marquardt round is performed without excessive constraints. Results from the previous round are used as initial guess. * after fitting is done, we compare results with best values found so far, rewrite "best solution" if needed, and move to next random location. Overall algorithm is very stable and is not prone to bad local extrema. Furthermore, it automatically scales when input data have very large or very small range. INPUT PARAMETERS: X - array[N], stores X-values. MUST include only non-negative numbers (but may include zero values). Can be unsorted. Y - array[N], values to fit. N - number of points. If N is less than length of X/Y, only leading N elements are used. OUTPUT PARAMETERS: A, B, C, D- parameters of 4PL model Rep - fitting report. This structure has many fields, but ONLY ONES LISTED BELOW ARE SET: * Rep.IterationsCount - number of iterations performed * Rep.RMSError - root-mean-square error * Rep.AvgError - average absolute error * Rep.AvgRelError - average relative error (calculated for non-zero Y-values) * Rep.MaxError - maximum absolute error * Rep.R2 - coefficient of determination, R-squared. This coefficient is calculated as R2=1-RSS/TSS (in case of nonlinear regression there are multiple ways to define R2, each of them giving different results). NOTE: for stability reasons the B parameter is restricted by [1/1000,1000] range. It prevents algorithm from making trial steps deep into the area of bad parameters. NOTE: after you obtained coefficients, you can evaluate model with LogisticCalc4() function. NOTE: if you need better control over fitting process than provided by this function, you may use LogisticFit45X(). NOTE: step is automatically scaled according to scale of parameters being fitted before we compare its length with EpsX. Thus, this function can be used to fit data with very small or very large values without changing EpsX. -- ALGLIB PROJECT -- Copyright 14.02.2014 by Bochkanov Sergey *************************************************************************/ public static void logisticfit4(double[] x, double[] y, int n, out double a, out double b, out double c, out double d, out lsfitreport rep) { a = 0; b = 0; c = 0; d = 0; rep = new lsfitreport(); lsfit.logisticfit4(x, y, n, ref a, ref b, ref c, ref d, rep.innerobj, null); } public static void logisticfit4(double[] x, double[] y, int n, out double a, out double b, out double c, out double d, out lsfitreport rep, alglib.xparams _params) { a = 0; b = 0; c = 0; d = 0; rep = new lsfitreport(); lsfit.logisticfit4(x, y, n, ref a, ref b, ref c, ref d, rep.innerobj, _params); } /************************************************************************* This function fits four-parameter logistic (4PL) model to data provided by user, with optional constraints on parameters A and D. 4PL model has following form: F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) Here: * A, D - with optional equality constraints * B>=0 * C>0 IMPORTANT: output of this function is constrained in such way that B>0. Because 4PL model is symmetric with respect to B, there is no need to explore B<0. Constraining B makes algorithm easier to stabilize and debug. Users who for some reason prefer to work with negative B's should transform output themselves (swap A and D, replace B by -B). 4PL fitting is implemented as follows: * we perform small number of restarts from random locations which helps to solve problem of bad local extrema. Locations are only partially random - we use input data to determine good initial guess, but we include controlled amount of randomness. * we perform Levenberg-Marquardt fitting with very tight constraints on parameters B and C - it allows us to find good initial guess for the second stage without risk of running into "flat spot". * second Levenberg-Marquardt round is performed without excessive constraints. Results from the previous round are used as initial guess. * after fitting is done, we compare results with best values found so far, rewrite "best solution" if needed, and move to next random location. Overall algorithm is very stable and is not prone to bad local extrema. Furthermore, it automatically scales when input data have very large or very small range. INPUT PARAMETERS: X - array[N], stores X-values. MUST include only non-negative numbers (but may include zero values). Can be unsorted. Y - array[N], values to fit. N - number of points. If N is less than length of X/Y, only leading N elements are used. CnstrLeft- optional equality constraint for model value at the left boundary (at X=0). Specify NAN (Not-a-Number) if you do not need constraint on the model value at X=0 (in C++ you can pass alglib::fp_nan as parameter, in C# it will be Double.NaN). See below, section "EQUALITY CONSTRAINTS" for more information about constraints. CnstrRight- optional equality constraint for model value at X=infinity. Specify NAN (Not-a-Number) if you do not need constraint on the model value (in C++ you can pass alglib::fp_nan as parameter, in C# it will be Double.NaN). See below, section "EQUALITY CONSTRAINTS" for more information about constraints. OUTPUT PARAMETERS: A, B, C, D- parameters of 4PL model Rep - fitting report. This structure has many fields, but ONLY ONES LISTED BELOW ARE SET: * Rep.IterationsCount - number of iterations performed * Rep.RMSError - root-mean-square error * Rep.AvgError - average absolute error * Rep.AvgRelError - average relative error (calculated for non-zero Y-values) * Rep.MaxError - maximum absolute error * Rep.R2 - coefficient of determination, R-squared. This coefficient is calculated as R2=1-RSS/TSS (in case of nonlinear regression there are multiple ways to define R2, each of them giving different results). NOTE: for stability reasons the B parameter is restricted by [1/1000,1000] range. It prevents algorithm from making trial steps deep into the area of bad parameters. NOTE: after you obtained coefficients, you can evaluate model with LogisticCalc4() function. NOTE: if you need better control over fitting process than provided by this function, you may use LogisticFit45X(). NOTE: step is automatically scaled according to scale of parameters being fitted before we compare its length with EpsX. Thus, this function can be used to fit data with very small or very large values without changing EpsX. EQUALITY CONSTRAINTS ON PARAMETERS 4PL/5PL solver supports equality constraints on model values at the left boundary (X=0) and right boundary (X=infinity). These constraints are completely optional and you can specify both of them, only one - or no constraints at all. Parameter CnstrLeft contains left constraint (or NAN for unconstrained fitting), and CnstrRight contains right one. For 4PL, left constraint ALWAYS corresponds to parameter A, and right one is ALWAYS constraint on D. That's because 4PL model is normalized in such way that B>=0. -- ALGLIB PROJECT -- Copyright 14.02.2014 by Bochkanov Sergey *************************************************************************/ public static void logisticfit4ec(double[] x, double[] y, int n, double cnstrleft, double cnstrright, out double a, out double b, out double c, out double d, out lsfitreport rep) { a = 0; b = 0; c = 0; d = 0; rep = new lsfitreport(); lsfit.logisticfit4ec(x, y, n, cnstrleft, cnstrright, ref a, ref b, ref c, ref d, rep.innerobj, null); } public static void logisticfit4ec(double[] x, double[] y, int n, double cnstrleft, double cnstrright, out double a, out double b, out double c, out double d, out lsfitreport rep, alglib.xparams _params) { a = 0; b = 0; c = 0; d = 0; rep = new lsfitreport(); lsfit.logisticfit4ec(x, y, n, cnstrleft, cnstrright, ref a, ref b, ref c, ref d, rep.innerobj, _params); } /************************************************************************* This function fits five-parameter logistic (5PL) model to data provided by user. 5PL model has following form: F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) Here: * A, D - unconstrained * B - unconstrained * C>0 * G>0 IMPORTANT: unlike in 4PL fitting, output of this function is NOT constrained in such way that B is guaranteed to be positive. Furthermore, unlike 4PL, 5PL model is NOT symmetric with respect to B, so you can NOT transform model to equivalent one, with B having desired sign (>0 or <0). 5PL fitting is implemented as follows: * we perform small number of restarts from random locations which helps to solve problem of bad local extrema. Locations are only partially random - we use input data to determine good initial guess, but we include controlled amount of randomness. * we perform Levenberg-Marquardt fitting with very tight constraints on parameters B and C - it allows us to find good initial guess for the second stage without risk of running into "flat spot". Parameter G is fixed at G=1. * second Levenberg-Marquardt round is performed without excessive constraints on B and C, but with G still equal to 1. Results from the previous round are used as initial guess. * third Levenberg-Marquardt round relaxes constraints on G and tries two different models - one with B>0 and one with B<0. * after fitting is done, we compare results with best values found so far, rewrite "best solution" if needed, and move to next random location. Overall algorithm is very stable and is not prone to bad local extrema. Furthermore, it automatically scales when input data have very large or very small range. INPUT PARAMETERS: X - array[N], stores X-values. MUST include only non-negative numbers (but may include zero values). Can be unsorted. Y - array[N], values to fit. N - number of points. If N is less than length of X/Y, only leading N elements are used. OUTPUT PARAMETERS: A,B,C,D,G- parameters of 5PL model Rep - fitting report. This structure has many fields, but ONLY ONES LISTED BELOW ARE SET: * Rep.IterationsCount - number of iterations performed * Rep.RMSError - root-mean-square error * Rep.AvgError - average absolute error * Rep.AvgRelError - average relative error (calculated for non-zero Y-values) * Rep.MaxError - maximum absolute error * Rep.R2 - coefficient of determination, R-squared. This coefficient is calculated as R2=1-RSS/TSS (in case of nonlinear regression there are multiple ways to define R2, each of them giving different results). NOTE: for better stability B parameter is restricted by [+-1/1000,+-1000] range, and G is restricted by [1/10,10] range. It prevents algorithm from making trial steps deep into the area of bad parameters. NOTE: after you obtained coefficients, you can evaluate model with LogisticCalc5() function. NOTE: if you need better control over fitting process than provided by this function, you may use LogisticFit45X(). NOTE: step is automatically scaled according to scale of parameters being fitted before we compare its length with EpsX. Thus, this function can be used to fit data with very small or very large values without changing EpsX. -- ALGLIB PROJECT -- Copyright 14.02.2014 by Bochkanov Sergey *************************************************************************/ public static void logisticfit5(double[] x, double[] y, int n, out double a, out double b, out double c, out double d, out double g, out lsfitreport rep) { a = 0; b = 0; c = 0; d = 0; g = 0; rep = new lsfitreport(); lsfit.logisticfit5(x, y, n, ref a, ref b, ref c, ref d, ref g, rep.innerobj, null); } public static void logisticfit5(double[] x, double[] y, int n, out double a, out double b, out double c, out double d, out double g, out lsfitreport rep, alglib.xparams _params) { a = 0; b = 0; c = 0; d = 0; g = 0; rep = new lsfitreport(); lsfit.logisticfit5(x, y, n, ref a, ref b, ref c, ref d, ref g, rep.innerobj, _params); } /************************************************************************* This function fits five-parameter logistic (5PL) model to data provided by user, subject to optional equality constraints on parameters A and D. 5PL model has following form: F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) Here: * A, D - with optional equality constraints * B - unconstrained * C>0 * G>0 IMPORTANT: unlike in 4PL fitting, output of this function is NOT constrained in such way that B is guaranteed to be positive. Furthermore, unlike 4PL, 5PL model is NOT symmetric with respect to B, so you can NOT transform model to equivalent one, with B having desired sign (>0 or <0). 5PL fitting is implemented as follows: * we perform small number of restarts from random locations which helps to solve problem of bad local extrema. Locations are only partially random - we use input data to determine good initial guess, but we include controlled amount of randomness. * we perform Levenberg-Marquardt fitting with very tight constraints on parameters B and C - it allows us to find good initial guess for the second stage without risk of running into "flat spot". Parameter G is fixed at G=1. * second Levenberg-Marquardt round is performed without excessive constraints on B and C, but with G still equal to 1. Results from the previous round are used as initial guess. * third Levenberg-Marquardt round relaxes constraints on G and tries two different models - one with B>0 and one with B<0. * after fitting is done, we compare results with best values found so far, rewrite "best solution" if needed, and move to next random location. Overall algorithm is very stable and is not prone to bad local extrema. Furthermore, it automatically scales when input data have very large or very small range. INPUT PARAMETERS: X - array[N], stores X-values. MUST include only non-negative numbers (but may include zero values). Can be unsorted. Y - array[N], values to fit. N - number of points. If N is less than length of X/Y, only leading N elements are used. CnstrLeft- optional equality constraint for model value at the left boundary (at X=0). Specify NAN (Not-a-Number) if you do not need constraint on the model value at X=0 (in C++ you can pass alglib::fp_nan as parameter, in C# it will be Double.NaN). See below, section "EQUALITY CONSTRAINTS" for more information about constraints. CnstrRight- optional equality constraint for model value at X=infinity. Specify NAN (Not-a-Number) if you do not need constraint on the model value (in C++ you can pass alglib::fp_nan as parameter, in C# it will be Double.NaN). See below, section "EQUALITY CONSTRAINTS" for more information about constraints. OUTPUT PARAMETERS: A,B,C,D,G- parameters of 5PL model Rep - fitting report. This structure has many fields, but ONLY ONES LISTED BELOW ARE SET: * Rep.IterationsCount - number of iterations performed * Rep.RMSError - root-mean-square error * Rep.AvgError - average absolute error * Rep.AvgRelError - average relative error (calculated for non-zero Y-values) * Rep.MaxError - maximum absolute error * Rep.R2 - coefficient of determination, R-squared. This coefficient is calculated as R2=1-RSS/TSS (in case of nonlinear regression there are multiple ways to define R2, each of them giving different results). NOTE: for better stability B parameter is restricted by [+-1/1000,+-1000] range, and G is restricted by [1/10,10] range. It prevents algorithm from making trial steps deep into the area of bad parameters. NOTE: after you obtained coefficients, you can evaluate model with LogisticCalc5() function. NOTE: if you need better control over fitting process than provided by this function, you may use LogisticFit45X(). NOTE: step is automatically scaled according to scale of parameters being fitted before we compare its length with EpsX. Thus, this function can be used to fit data with very small or very large values without changing EpsX. EQUALITY CONSTRAINTS ON PARAMETERS 5PL solver supports equality constraints on model values at the left boundary (X=0) and right boundary (X=infinity). These constraints are completely optional and you can specify both of them, only one - or no constraints at all. Parameter CnstrLeft contains left constraint (or NAN for unconstrained fitting), and CnstrRight contains right one. Unlike 4PL one, 5PL model is NOT symmetric with respect to change in sign of B. Thus, negative B's are possible, and left constraint may constrain parameter A (for positive B's) - or parameter D (for negative B's). Similarly changes meaning of right constraint. You do not have to decide what parameter to constrain - algorithm will automatically determine correct parameters as fitting progresses. However, question highlighted above is important when you interpret fitting results. -- ALGLIB PROJECT -- Copyright 14.02.2014 by Bochkanov Sergey *************************************************************************/ public static void logisticfit5ec(double[] x, double[] y, int n, double cnstrleft, double cnstrright, out double a, out double b, out double c, out double d, out double g, out lsfitreport rep) { a = 0; b = 0; c = 0; d = 0; g = 0; rep = new lsfitreport(); lsfit.logisticfit5ec(x, y, n, cnstrleft, cnstrright, ref a, ref b, ref c, ref d, ref g, rep.innerobj, null); } public static void logisticfit5ec(double[] x, double[] y, int n, double cnstrleft, double cnstrright, out double a, out double b, out double c, out double d, out double g, out lsfitreport rep, alglib.xparams _params) { a = 0; b = 0; c = 0; d = 0; g = 0; rep = new lsfitreport(); lsfit.logisticfit5ec(x, y, n, cnstrleft, cnstrright, ref a, ref b, ref c, ref d, ref g, rep.innerobj, _params); } /************************************************************************* This is "expert" 4PL/5PL fitting function, which can be used if you need better control over fitting process than provided by LogisticFit4() or LogisticFit5(). This function fits model of the form F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) (4PL model) or F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) (5PL model) Here: * A, D - unconstrained * B>=0 for 4PL, unconstrained for 5PL * C>0 * G>0 (if present) INPUT PARAMETERS: X - array[N], stores X-values. MUST include only non-negative numbers (but may include zero values). Can be unsorted. Y - array[N], values to fit. N - number of points. If N is less than length of X/Y, only leading N elements are used. CnstrLeft- optional equality constraint for model value at the left boundary (at X=0). Specify NAN (Not-a-Number) if you do not need constraint on the model value at X=0 (in C++ you can pass alglib::fp_nan as parameter, in C# it will be Double.NaN). See below, section "EQUALITY CONSTRAINTS" for more information about constraints. CnstrRight- optional equality constraint for model value at X=infinity. Specify NAN (Not-a-Number) if you do not need constraint on the model value (in C++ you can pass alglib::fp_nan as parameter, in C# it will be Double.NaN). See below, section "EQUALITY CONSTRAINTS" for more information about constraints. Is4PL - whether 4PL or 5PL models are fitted LambdaV - regularization coefficient, LambdaV>=0. Set it to zero unless you know what you are doing. EpsX - stopping condition (step size), EpsX>=0. Zero value means that small step is automatically chosen. See notes below for more information. RsCnt - number of repeated restarts from random points. 4PL/5PL models are prone to problem of bad local extrema. Utilizing multiple random restarts allows us to improve algorithm convergence. RsCnt>=0. Zero value means that function automatically choose small amount of restarts (recommended). OUTPUT PARAMETERS: A, B, C, D- parameters of 4PL model G - parameter of 5PL model; for Is4PL=True, G=1 is returned. Rep - fitting report. This structure has many fields, but ONLY ONES LISTED BELOW ARE SET: * Rep.IterationsCount - number of iterations performed * Rep.RMSError - root-mean-square error * Rep.AvgError - average absolute error * Rep.AvgRelError - average relative error (calculated for non-zero Y-values) * Rep.MaxError - maximum absolute error * Rep.R2 - coefficient of determination, R-squared. This coefficient is calculated as R2=1-RSS/TSS (in case of nonlinear regression there are multiple ways to define R2, each of them giving different results). NOTE: for better stability B parameter is restricted by [+-1/1000,+-1000] range, and G is restricted by [1/10,10] range. It prevents algorithm from making trial steps deep into the area of bad parameters. NOTE: after you obtained coefficients, you can evaluate model with LogisticCalc5() function. NOTE: step is automatically scaled according to scale of parameters being fitted before we compare its length with EpsX. Thus, this function can be used to fit data with very small or very large values without changing EpsX. EQUALITY CONSTRAINTS ON PARAMETERS 4PL/5PL solver supports equality constraints on model values at the left boundary (X=0) and right boundary (X=infinity). These constraints are completely optional and you can specify both of them, only one - or no constraints at all. Parameter CnstrLeft contains left constraint (or NAN for unconstrained fitting), and CnstrRight contains right one. For 4PL, left constraint ALWAYS corresponds to parameter A, and right one is ALWAYS constraint on D. That's because 4PL model is normalized in such way that B>=0. For 5PL model things are different. Unlike 4PL one, 5PL model is NOT symmetric with respect to change in sign of B. Thus, negative B's are possible, and left constraint may constrain parameter A (for positive B's) - or parameter D (for negative B's). Similarly changes meaning of right constraint. You do not have to decide what parameter to constrain - algorithm will automatically determine correct parameters as fitting progresses. However, question highlighted above is important when you interpret fitting results. -- ALGLIB PROJECT -- Copyright 14.02.2014 by Bochkanov Sergey *************************************************************************/ public static void logisticfit45x(double[] x, double[] y, int n, double cnstrleft, double cnstrright, bool is4pl, double lambdav, double epsx, int rscnt, out double a, out double b, out double c, out double d, out double g, out lsfitreport rep) { a = 0; b = 0; c = 0; d = 0; g = 0; rep = new lsfitreport(); lsfit.logisticfit45x(x, y, n, cnstrleft, cnstrright, is4pl, lambdav, epsx, rscnt, ref a, ref b, ref c, ref d, ref g, rep.innerobj, null); } public static void logisticfit45x(double[] x, double[] y, int n, double cnstrleft, double cnstrright, bool is4pl, double lambdav, double epsx, int rscnt, out double a, out double b, out double c, out double d, out double g, out lsfitreport rep, alglib.xparams _params) { a = 0; b = 0; c = 0; d = 0; g = 0; rep = new lsfitreport(); lsfit.logisticfit45x(x, y, n, cnstrleft, cnstrright, is4pl, lambdav, epsx, rscnt, ref a, ref b, ref c, ref d, ref g, rep.innerobj, _params); } /************************************************************************* Weghted rational least squares fitting using Floater-Hormann rational functions with optimal D chosen from [0,9], with constraints and individual weights. Equidistant grid with M node on [min(x),max(x)] is used to build basis functions. Different values of D are tried, optimal D (least WEIGHTED root mean square error) is chosen. Task is linear, so linear least squares solver is used. Complexity of this computational scheme is O(N*M^2) (mostly dominated by the least squares solver). SEE ALSO * BarycentricFitFloaterHormann(), "lightweight" fitting without invididual weights and constraints. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - points, array[0..N-1]. Y - function values, array[0..N-1]. W - weights, array[0..N-1] Each summand in square sum of approximation deviations from given values is multiplied by the square of corresponding weight. Fill it by 1's if you don't want to solve weighted task. N - number of points, N>0. XC - points where function values/derivatives are constrained, array[0..K-1]. YC - values of constraints, array[0..K-1] DC - array[0..K-1], types of constraints: * DC[i]=0 means that S(XC[i])=YC[i] * DC[i]=1 means that S'(XC[i])=YC[i] SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS K - number of constraints, 0<=K=2. OUTPUT PARAMETERS: Info- same format as in LSFitLinearWC() subroutine. * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD -3 means inconsistent constraints -1 means another errors in parameters passed (N<=0, for example) B - barycentric interpolant. Rep - report, same format as in LSFitLinearWC() subroutine. Following fields are set: * DBest best value of the D parameter * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroutine doesn't calculate task's condition number for K<>0. SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: Setting constraints can lead to undesired results, like ill-conditioned behavior, or inconsistency being detected. From the other side, it allows us to improve quality of the fit. Here we summarize our experience with constrained barycentric interpolants: * excessive constraints can be inconsistent. Floater-Hormann basis functions aren't as flexible as splines (although they are very smooth). * the more evenly constraints are spread across [min(x),max(x)], the more chances that they will be consistent * the greater is M (given fixed constraints), the more chances that constraints will be consistent * in the general case, consistency of constraints IS NOT GUARANTEED. * in the several special cases, however, we CAN guarantee consistency. * one of this cases is constraints on the function VALUES at the interval boundaries. Note that consustency of the constraints on the function DERIVATIVES is NOT guaranteed (you can use in such cases cubic splines which are more flexible). * another special case is ONE constraint on the function value (OR, but not AND, derivative) anywhere in the interval Our final recommendation is to use constraints WHEN AND ONLY WHEN you can't solve your task without them. Anything beyond special cases given above is not guaranteed and may result in inconsistency. -- ALGLIB PROJECT -- Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentricfitfloaterhormannwc(double[] x, double[] y, double[] w, int n, double[] xc, double[] yc, int[] dc, int k, int m, out int info, out barycentricinterpolant b, out barycentricfitreport rep) { info = 0; b = new barycentricinterpolant(); rep = new barycentricfitreport(); lsfit.barycentricfitfloaterhormannwc(x, y, w, n, xc, yc, dc, k, m, ref info, b.innerobj, rep.innerobj, null); } public static void barycentricfitfloaterhormannwc(double[] x, double[] y, double[] w, int n, double[] xc, double[] yc, int[] dc, int k, int m, out int info, out barycentricinterpolant b, out barycentricfitreport rep, alglib.xparams _params) { info = 0; b = new barycentricinterpolant(); rep = new barycentricfitreport(); lsfit.barycentricfitfloaterhormannwc(x, y, w, n, xc, yc, dc, k, m, ref info, b.innerobj, rep.innerobj, _params); } /************************************************************************* Rational least squares fitting using Floater-Hormann rational functions with optimal D chosen from [0,9]. Equidistant grid with M node on [min(x),max(x)] is used to build basis functions. Different values of D are tried, optimal D (least root mean square error) is chosen. Task is linear, so linear least squares solver is used. Complexity of this computational scheme is O(N*M^2) (mostly dominated by the least squares solver). ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - points, array[0..N-1]. Y - function values, array[0..N-1]. N - number of points, N>0. M - number of basis functions ( = number_of_nodes), M>=2. OUTPUT PARAMETERS: Info- same format as in LSFitLinearWC() subroutine. * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD -3 means inconsistent constraints B - barycentric interpolant. Rep - report, same format as in LSFitLinearWC() subroutine. Following fields are set: * DBest best value of the D parameter * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED -- ALGLIB PROJECT -- Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentricfitfloaterhormann(double[] x, double[] y, int n, int m, out int info, out barycentricinterpolant b, out barycentricfitreport rep) { info = 0; b = new barycentricinterpolant(); rep = new barycentricfitreport(); lsfit.barycentricfitfloaterhormann(x, y, n, m, ref info, b.innerobj, rep.innerobj, null); } public static void barycentricfitfloaterhormann(double[] x, double[] y, int n, int m, out int info, out barycentricinterpolant b, out barycentricfitreport rep, alglib.xparams _params) { info = 0; b = new barycentricinterpolant(); rep = new barycentricfitreport(); lsfit.barycentricfitfloaterhormann(x, y, n, m, ref info, b.innerobj, rep.innerobj, _params); } /************************************************************************* Weighted fitting by cubic spline, with constraints on function values or derivatives. Equidistant grid with M-2 nodes on [min(x,xc),max(x,xc)] is used to build basis functions. Basis functions are cubic splines with continuous second derivatives and non-fixed first derivatives at interval ends. Small regularizing term is used when solving constrained tasks (to improve stability). Task is linear, so linear least squares solver is used. Complexity of this computational scheme is O(N*M^2), mostly dominated by least squares solver SEE ALSO Spline1DFitHermiteWC() - fitting by Hermite splines (more flexible, less smooth) Spline1DFitCubic() - "lightweight" fitting by cubic splines, without invididual weights and constraints ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - points, array[0..N-1]. Y - function values, array[0..N-1]. W - weights, array[0..N-1] Each summand in square sum of approximation deviations from given values is multiplied by the square of corresponding weight. Fill it by 1's if you don't want to solve weighted task. N - number of points (optional): * N>0 * if given, only first N elements of X/Y/W are processed * if not given, automatically determined from X/Y/W sizes XC - points where spline values/derivatives are constrained, array[0..K-1]. YC - values of constraints, array[0..K-1] DC - array[0..K-1], types of constraints: * DC[i]=0 means that S(XC[i])=YC[i] * DC[i]=1 means that S'(XC[i])=YC[i] SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS K - number of constraints (optional): * 0<=K=4. OUTPUT PARAMETERS: Info- same format as in LSFitLinearWC() subroutine. * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD -3 means inconsistent constraints S - spline interpolant. Rep - report, same format as in LSFitLinearWC() subroutine. Following fields are set: * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroitine doesn't calculate task's condition number for K<>0. ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: Setting constraints can lead to undesired results, like ill-conditioned behavior, or inconsistency being detected. From the other side, it allows us to improve quality of the fit. Here we summarize our experience with constrained regression splines: * excessive constraints can be inconsistent. Splines are piecewise cubic functions, and it is easy to create an example, where large number of constraints concentrated in small area will result in inconsistency. Just because spline is not flexible enough to satisfy all of them. And same constraints spread across the [min(x),max(x)] will be perfectly consistent. * the more evenly constraints are spread across [min(x),max(x)], the more chances that they will be consistent * the greater is M (given fixed constraints), the more chances that constraints will be consistent * in the general case, consistency of constraints IS NOT GUARANTEED. * in the several special cases, however, we CAN guarantee consistency. * one of this cases is constraints on the function values AND/OR its derivatives at the interval boundaries. * another special case is ONE constraint on the function value (OR, but not AND, derivative) anywhere in the interval Our final recommendation is to use constraints WHEN AND ONLY WHEN you can't solve your task without them. Anything beyond special cases given above is not guaranteed and may result in inconsistency. -- ALGLIB PROJECT -- Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ public static void spline1dfitcubicwc(double[] x, double[] y, double[] w, int n, double[] xc, double[] yc, int[] dc, int k, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep) { info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); lsfit.spline1dfitcubicwc(x, y, w, n, xc, yc, dc, k, m, ref info, s.innerobj, rep.innerobj, null); } public static void spline1dfitcubicwc(double[] x, double[] y, double[] w, int n, double[] xc, double[] yc, int[] dc, int k, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep, alglib.xparams _params) { info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); lsfit.spline1dfitcubicwc(x, y, w, n, xc, yc, dc, k, m, ref info, s.innerobj, rep.innerobj, _params); } public static void spline1dfitcubicwc(double[] x, double[] y, double[] w, double[] xc, double[] yc, int[] dc, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep) { int n; int k; if( (ap.len(x)!=ap.len(y)) || (ap.len(x)!=ap.len(w))) throw new alglibexception("Error while calling 'spline1dfitcubicwc': looks like one of arguments has wrong size"); if( (ap.len(xc)!=ap.len(yc)) || (ap.len(xc)!=ap.len(dc))) throw new alglibexception("Error while calling 'spline1dfitcubicwc': looks like one of arguments has wrong size"); info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); n = ap.len(x); k = ap.len(xc); lsfit.spline1dfitcubicwc(x, y, w, n, xc, yc, dc, k, m, ref info, s.innerobj, rep.innerobj, null); return; } public static void spline1dfitcubicwc(double[] x, double[] y, double[] w, double[] xc, double[] yc, int[] dc, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep, alglib.xparams _params) { int n; int k; if( (ap.len(x)!=ap.len(y)) || (ap.len(x)!=ap.len(w))) throw new alglibexception("Error while calling 'spline1dfitcubicwc': looks like one of arguments has wrong size"); if( (ap.len(xc)!=ap.len(yc)) || (ap.len(xc)!=ap.len(dc))) throw new alglibexception("Error while calling 'spline1dfitcubicwc': looks like one of arguments has wrong size"); info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); n = ap.len(x); k = ap.len(xc); lsfit.spline1dfitcubicwc(x, y, w, n, xc, yc, dc, k, m, ref info, s.innerobj, rep.innerobj, _params); return; } /************************************************************************* Weighted fitting by Hermite spline, with constraints on function values or first derivatives. Equidistant grid with M nodes on [min(x,xc),max(x,xc)] is used to build basis functions. Basis functions are Hermite splines. Small regularizing term is used when solving constrained tasks (to improve stability). Task is linear, so linear least squares solver is used. Complexity of this computational scheme is O(N*M^2), mostly dominated by least squares solver SEE ALSO Spline1DFitCubicWC() - fitting by Cubic splines (less flexible, more smooth) Spline1DFitHermite() - "lightweight" Hermite fitting, without invididual weights and constraints ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - points, array[0..N-1]. Y - function values, array[0..N-1]. W - weights, array[0..N-1] Each summand in square sum of approximation deviations from given values is multiplied by the square of corresponding weight. Fill it by 1's if you don't want to solve weighted task. N - number of points (optional): * N>0 * if given, only first N elements of X/Y/W are processed * if not given, automatically determined from X/Y/W sizes XC - points where spline values/derivatives are constrained, array[0..K-1]. YC - values of constraints, array[0..K-1] DC - array[0..K-1], types of constraints: * DC[i]=0 means that S(XC[i])=YC[i] * DC[i]=1 means that S'(XC[i])=YC[i] SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS K - number of constraints (optional): * 0<=K=4, M IS EVEN! OUTPUT PARAMETERS: Info- same format as in LSFitLinearW() subroutine: * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD -3 means inconsistent constraints -2 means odd M was passed (which is not supported) -1 means another errors in parameters passed (N<=0, for example) S - spline interpolant. Rep - report, same format as in LSFitLinearW() subroutine. Following fields are set: * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroitine doesn't calculate task's condition number for K<>0. IMPORTANT: this subroitine supports only even M's ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: Setting constraints can lead to undesired results, like ill-conditioned behavior, or inconsistency being detected. From the other side, it allows us to improve quality of the fit. Here we summarize our experience with constrained regression splines: * excessive constraints can be inconsistent. Splines are piecewise cubic functions, and it is easy to create an example, where large number of constraints concentrated in small area will result in inconsistency. Just because spline is not flexible enough to satisfy all of them. And same constraints spread across the [min(x),max(x)] will be perfectly consistent. * the more evenly constraints are spread across [min(x),max(x)], the more chances that they will be consistent * the greater is M (given fixed constraints), the more chances that constraints will be consistent * in the general case, consistency of constraints is NOT GUARANTEED. * in the several special cases, however, we can guarantee consistency. * one of this cases is M>=4 and constraints on the function value (AND/OR its derivative) at the interval boundaries. * another special case is M>=4 and ONE constraint on the function value (OR, BUT NOT AND, derivative) anywhere in [min(x),max(x)] Our final recommendation is to use constraints WHEN AND ONLY when you can't solve your task without them. Anything beyond special cases given above is not guaranteed and may result in inconsistency. -- ALGLIB PROJECT -- Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ public static void spline1dfithermitewc(double[] x, double[] y, double[] w, int n, double[] xc, double[] yc, int[] dc, int k, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep) { info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); lsfit.spline1dfithermitewc(x, y, w, n, xc, yc, dc, k, m, ref info, s.innerobj, rep.innerobj, null); } public static void spline1dfithermitewc(double[] x, double[] y, double[] w, int n, double[] xc, double[] yc, int[] dc, int k, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep, alglib.xparams _params) { info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); lsfit.spline1dfithermitewc(x, y, w, n, xc, yc, dc, k, m, ref info, s.innerobj, rep.innerobj, _params); } public static void spline1dfithermitewc(double[] x, double[] y, double[] w, double[] xc, double[] yc, int[] dc, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep) { int n; int k; if( (ap.len(x)!=ap.len(y)) || (ap.len(x)!=ap.len(w))) throw new alglibexception("Error while calling 'spline1dfithermitewc': looks like one of arguments has wrong size"); if( (ap.len(xc)!=ap.len(yc)) || (ap.len(xc)!=ap.len(dc))) throw new alglibexception("Error while calling 'spline1dfithermitewc': looks like one of arguments has wrong size"); info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); n = ap.len(x); k = ap.len(xc); lsfit.spline1dfithermitewc(x, y, w, n, xc, yc, dc, k, m, ref info, s.innerobj, rep.innerobj, null); return; } public static void spline1dfithermitewc(double[] x, double[] y, double[] w, double[] xc, double[] yc, int[] dc, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep, alglib.xparams _params) { int n; int k; if( (ap.len(x)!=ap.len(y)) || (ap.len(x)!=ap.len(w))) throw new alglibexception("Error while calling 'spline1dfithermitewc': looks like one of arguments has wrong size"); if( (ap.len(xc)!=ap.len(yc)) || (ap.len(xc)!=ap.len(dc))) throw new alglibexception("Error while calling 'spline1dfithermitewc': looks like one of arguments has wrong size"); info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); n = ap.len(x); k = ap.len(xc); lsfit.spline1dfithermitewc(x, y, w, n, xc, yc, dc, k, m, ref info, s.innerobj, rep.innerobj, _params); return; } /************************************************************************* Least squares fitting by cubic spline. This subroutine is "lightweight" alternative for more complex and feature- rich Spline1DFitCubicWC(). See Spline1DFitCubicWC() for more information about subroutine parameters (we don't duplicate it here because of length) ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. -- ALGLIB PROJECT -- Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ public static void spline1dfitcubic(double[] x, double[] y, int n, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep) { info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); lsfit.spline1dfitcubic(x, y, n, m, ref info, s.innerobj, rep.innerobj, null); } public static void spline1dfitcubic(double[] x, double[] y, int n, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep, alglib.xparams _params) { info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); lsfit.spline1dfitcubic(x, y, n, m, ref info, s.innerobj, rep.innerobj, _params); } public static void spline1dfitcubic(double[] x, double[] y, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dfitcubic': looks like one of arguments has wrong size"); info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); n = ap.len(x); lsfit.spline1dfitcubic(x, y, n, m, ref info, s.innerobj, rep.innerobj, null); return; } public static void spline1dfitcubic(double[] x, double[] y, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep, alglib.xparams _params) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dfitcubic': looks like one of arguments has wrong size"); info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); n = ap.len(x); lsfit.spline1dfitcubic(x, y, n, m, ref info, s.innerobj, rep.innerobj, _params); return; } /************************************************************************* Least squares fitting by Hermite spline. This subroutine is "lightweight" alternative for more complex and feature- rich Spline1DFitHermiteWC(). See Spline1DFitHermiteWC() description for more information about subroutine parameters (we don't duplicate it here because of length). ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. -- ALGLIB PROJECT -- Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ public static void spline1dfithermite(double[] x, double[] y, int n, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep) { info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); lsfit.spline1dfithermite(x, y, n, m, ref info, s.innerobj, rep.innerobj, null); } public static void spline1dfithermite(double[] x, double[] y, int n, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep, alglib.xparams _params) { info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); lsfit.spline1dfithermite(x, y, n, m, ref info, s.innerobj, rep.innerobj, _params); } public static void spline1dfithermite(double[] x, double[] y, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dfithermite': looks like one of arguments has wrong size"); info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); n = ap.len(x); lsfit.spline1dfithermite(x, y, n, m, ref info, s.innerobj, rep.innerobj, null); return; } public static void spline1dfithermite(double[] x, double[] y, int m, out int info, out spline1dinterpolant s, out spline1dfitreport rep, alglib.xparams _params) { int n; if( (ap.len(x)!=ap.len(y))) throw new alglibexception("Error while calling 'spline1dfithermite': looks like one of arguments has wrong size"); info = 0; s = new spline1dinterpolant(); rep = new spline1dfitreport(); n = ap.len(x); lsfit.spline1dfithermite(x, y, n, m, ref info, s.innerobj, rep.innerobj, _params); return; } /************************************************************************* Weighted linear least squares fitting. QR decomposition is used to reduce task to MxM, then triangular solver or SVD-based solver is used depending on condition number of the system. It allows to maximize speed and retain decent accuracy. IMPORTANT: if you want to perform polynomial fitting, it may be more convenient to use PolynomialFit() function. This function gives best results on polynomial problems and solves numerical stability issues which arise when you fit high-degree polynomials to your data. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: Y - array[0..N-1] Function values in N points. W - array[0..N-1] Weights corresponding to function values. Each summand in square sum of approximation deviations from given values is multiplied by the square of corresponding weight. FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. FMatrix[I, J] - value of J-th basis function in I-th point. N - number of points used. N>=1. M - number of basis functions, M>=1. OUTPUT PARAMETERS: Info - error code: * -4 internal SVD decomposition subroutine failed (very rare and for degenerate systems only) * -1 incorrect N/M were specified * 1 task is solved C - decomposition coefficients, array[0..M-1] Rep - fitting report. Following fields are set: * Rep.TaskRCond reciprocal of condition number * R2 non-adjusted coefficient of determination (non-weighted) * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED ERRORS IN PARAMETERS This solver also calculates different kinds of errors in parameters and fills corresponding fields of report: * Rep.CovPar covariance matrix for parameters, array[K,K]. * Rep.ErrPar errors in parameters, array[K], errpar = sqrt(diag(CovPar)) * Rep.ErrCurve vector of fit errors - standard deviations of empirical best-fit curve from "ideal" best-fit curve built with infinite number of samples, array[N]. errcurve = sqrt(diag(F*CovPar*F')), where F is functions matrix. * Rep.Noise vector of per-point estimates of noise, array[N] NOTE: noise in the data is estimated as follows: * for fitting without user-supplied weights all points are assumed to have same level of noise, which is estimated from the data * for fitting with user-supplied weights we assume that noise level in I-th point is inversely proportional to Ith weight. Coefficient of proportionality is estimated from the data. NOTE: we apply small amount of regularization when we invert squared Jacobian and calculate covariance matrix. It guarantees that algorithm won't divide by zero during inversion, but skews error estimates a bit (fractional error is about 10^-9). However, we believe that this difference is insignificant for all practical purposes except for the situation when you want to compare ALGLIB results with "reference" implementation up to the last significant digit. NOTE: covariance matrix is estimated using correction for degrees of freedom (covariances are divided by N-M instead of dividing by N). -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitlinearw(double[] y, double[] w, double[,] fmatrix, int n, int m, out int info, out double[] c, out lsfitreport rep) { info = 0; c = new double[0]; rep = new lsfitreport(); lsfit.lsfitlinearw(y, w, fmatrix, n, m, ref info, ref c, rep.innerobj, null); } public static void lsfitlinearw(double[] y, double[] w, double[,] fmatrix, int n, int m, out int info, out double[] c, out lsfitreport rep, alglib.xparams _params) { info = 0; c = new double[0]; rep = new lsfitreport(); lsfit.lsfitlinearw(y, w, fmatrix, n, m, ref info, ref c, rep.innerobj, _params); } public static void lsfitlinearw(double[] y, double[] w, double[,] fmatrix, out int info, out double[] c, out lsfitreport rep) { int n; int m; if( (ap.len(y)!=ap.len(w)) || (ap.len(y)!=ap.rows(fmatrix))) throw new alglibexception("Error while calling 'lsfitlinearw': looks like one of arguments has wrong size"); info = 0; c = new double[0]; rep = new lsfitreport(); n = ap.len(y); m = ap.cols(fmatrix); lsfit.lsfitlinearw(y, w, fmatrix, n, m, ref info, ref c, rep.innerobj, null); return; } public static void lsfitlinearw(double[] y, double[] w, double[,] fmatrix, out int info, out double[] c, out lsfitreport rep, alglib.xparams _params) { int n; int m; if( (ap.len(y)!=ap.len(w)) || (ap.len(y)!=ap.rows(fmatrix))) throw new alglibexception("Error while calling 'lsfitlinearw': looks like one of arguments has wrong size"); info = 0; c = new double[0]; rep = new lsfitreport(); n = ap.len(y); m = ap.cols(fmatrix); lsfit.lsfitlinearw(y, w, fmatrix, n, m, ref info, ref c, rep.innerobj, _params); return; } /************************************************************************* Weighted constained linear least squares fitting. This is variation of LSFitLinearW(), which searchs for min|A*x=b| given that K additional constaints C*x=bc are satisfied. It reduces original task to modified one: min|B*y-d| WITHOUT constraints, then LSFitLinearW() is called. IMPORTANT: if you want to perform polynomial fitting, it may be more convenient to use PolynomialFit() function. This function gives best results on polynomial problems and solves numerical stability issues which arise when you fit high-degree polynomials to your data. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: Y - array[0..N-1] Function values in N points. W - array[0..N-1] Weights corresponding to function values. Each summand in square sum of approximation deviations from given values is multiplied by the square of corresponding weight. FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. FMatrix[I,J] - value of J-th basis function in I-th point. CMatrix - a table of constaints, array[0..K-1,0..M]. I-th row of CMatrix corresponds to I-th linear constraint: CMatrix[I,0]*C[0] + ... + CMatrix[I,M-1]*C[M-1] = CMatrix[I,M] N - number of points used. N>=1. M - number of basis functions, M>=1. K - number of constraints, 0 <= K < M K=0 corresponds to absence of constraints. OUTPUT PARAMETERS: Info - error code: * -4 internal SVD decomposition subroutine failed (very rare and for degenerate systems only) * -3 either too many constraints (M or more), degenerate constraints (some constraints are repetead twice) or inconsistent constraints were specified. * 1 task is solved C - decomposition coefficients, array[0..M-1] Rep - fitting report. Following fields are set: * R2 non-adjusted coefficient of determination (non-weighted) * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroitine doesn't calculate task's condition number for K<>0. ERRORS IN PARAMETERS This solver also calculates different kinds of errors in parameters and fills corresponding fields of report: * Rep.CovPar covariance matrix for parameters, array[K,K]. * Rep.ErrPar errors in parameters, array[K], errpar = sqrt(diag(CovPar)) * Rep.ErrCurve vector of fit errors - standard deviations of empirical best-fit curve from "ideal" best-fit curve built with infinite number of samples, array[N]. errcurve = sqrt(diag(F*CovPar*F')), where F is functions matrix. * Rep.Noise vector of per-point estimates of noise, array[N] IMPORTANT: errors in parameters are calculated without taking into account boundary/linear constraints! Presence of constraints changes distribution of errors, but there is no easy way to account for constraints when you calculate covariance matrix. NOTE: noise in the data is estimated as follows: * for fitting without user-supplied weights all points are assumed to have same level of noise, which is estimated from the data * for fitting with user-supplied weights we assume that noise level in I-th point is inversely proportional to Ith weight. Coefficient of proportionality is estimated from the data. NOTE: we apply small amount of regularization when we invert squared Jacobian and calculate covariance matrix. It guarantees that algorithm won't divide by zero during inversion, but skews error estimates a bit (fractional error is about 10^-9). However, we believe that this difference is insignificant for all practical purposes except for the situation when you want to compare ALGLIB results with "reference" implementation up to the last significant digit. NOTE: covariance matrix is estimated using correction for degrees of freedom (covariances are divided by N-M instead of dividing by N). -- ALGLIB -- Copyright 07.09.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitlinearwc(double[] y, double[] w, double[,] fmatrix, double[,] cmatrix, int n, int m, int k, out int info, out double[] c, out lsfitreport rep) { info = 0; c = new double[0]; rep = new lsfitreport(); lsfit.lsfitlinearwc(y, w, fmatrix, cmatrix, n, m, k, ref info, ref c, rep.innerobj, null); } public static void lsfitlinearwc(double[] y, double[] w, double[,] fmatrix, double[,] cmatrix, int n, int m, int k, out int info, out double[] c, out lsfitreport rep, alglib.xparams _params) { info = 0; c = new double[0]; rep = new lsfitreport(); lsfit.lsfitlinearwc(y, w, fmatrix, cmatrix, n, m, k, ref info, ref c, rep.innerobj, _params); } public static void lsfitlinearwc(double[] y, double[] w, double[,] fmatrix, double[,] cmatrix, out int info, out double[] c, out lsfitreport rep) { int n; int m; int k; if( (ap.len(y)!=ap.len(w)) || (ap.len(y)!=ap.rows(fmatrix))) throw new alglibexception("Error while calling 'lsfitlinearwc': looks like one of arguments has wrong size"); if( (ap.cols(fmatrix)!=ap.cols(cmatrix)-1)) throw new alglibexception("Error while calling 'lsfitlinearwc': looks like one of arguments has wrong size"); info = 0; c = new double[0]; rep = new lsfitreport(); n = ap.len(y); m = ap.cols(fmatrix); k = ap.rows(cmatrix); lsfit.lsfitlinearwc(y, w, fmatrix, cmatrix, n, m, k, ref info, ref c, rep.innerobj, null); return; } public static void lsfitlinearwc(double[] y, double[] w, double[,] fmatrix, double[,] cmatrix, out int info, out double[] c, out lsfitreport rep, alglib.xparams _params) { int n; int m; int k; if( (ap.len(y)!=ap.len(w)) || (ap.len(y)!=ap.rows(fmatrix))) throw new alglibexception("Error while calling 'lsfitlinearwc': looks like one of arguments has wrong size"); if( (ap.cols(fmatrix)!=ap.cols(cmatrix)-1)) throw new alglibexception("Error while calling 'lsfitlinearwc': looks like one of arguments has wrong size"); info = 0; c = new double[0]; rep = new lsfitreport(); n = ap.len(y); m = ap.cols(fmatrix); k = ap.rows(cmatrix); lsfit.lsfitlinearwc(y, w, fmatrix, cmatrix, n, m, k, ref info, ref c, rep.innerobj, _params); return; } /************************************************************************* Linear least squares fitting. QR decomposition is used to reduce task to MxM, then triangular solver or SVD-based solver is used depending on condition number of the system. It allows to maximize speed and retain decent accuracy. IMPORTANT: if you want to perform polynomial fitting, it may be more convenient to use PolynomialFit() function. This function gives best results on polynomial problems and solves numerical stability issues which arise when you fit high-degree polynomials to your data. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: Y - array[0..N-1] Function values in N points. FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. FMatrix[I, J] - value of J-th basis function in I-th point. N - number of points used. N>=1. M - number of basis functions, M>=1. OUTPUT PARAMETERS: Info - error code: * -4 internal SVD decomposition subroutine failed (very rare and for degenerate systems only) * 1 task is solved C - decomposition coefficients, array[0..M-1] Rep - fitting report. Following fields are set: * Rep.TaskRCond reciprocal of condition number * R2 non-adjusted coefficient of determination (non-weighted) * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED ERRORS IN PARAMETERS This solver also calculates different kinds of errors in parameters and fills corresponding fields of report: * Rep.CovPar covariance matrix for parameters, array[K,K]. * Rep.ErrPar errors in parameters, array[K], errpar = sqrt(diag(CovPar)) * Rep.ErrCurve vector of fit errors - standard deviations of empirical best-fit curve from "ideal" best-fit curve built with infinite number of samples, array[N]. errcurve = sqrt(diag(F*CovPar*F')), where F is functions matrix. * Rep.Noise vector of per-point estimates of noise, array[N] NOTE: noise in the data is estimated as follows: * for fitting without user-supplied weights all points are assumed to have same level of noise, which is estimated from the data * for fitting with user-supplied weights we assume that noise level in I-th point is inversely proportional to Ith weight. Coefficient of proportionality is estimated from the data. NOTE: we apply small amount of regularization when we invert squared Jacobian and calculate covariance matrix. It guarantees that algorithm won't divide by zero during inversion, but skews error estimates a bit (fractional error is about 10^-9). However, we believe that this difference is insignificant for all practical purposes except for the situation when you want to compare ALGLIB results with "reference" implementation up to the last significant digit. NOTE: covariance matrix is estimated using correction for degrees of freedom (covariances are divided by N-M instead of dividing by N). -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitlinear(double[] y, double[,] fmatrix, int n, int m, out int info, out double[] c, out lsfitreport rep) { info = 0; c = new double[0]; rep = new lsfitreport(); lsfit.lsfitlinear(y, fmatrix, n, m, ref info, ref c, rep.innerobj, null); } public static void lsfitlinear(double[] y, double[,] fmatrix, int n, int m, out int info, out double[] c, out lsfitreport rep, alglib.xparams _params) { info = 0; c = new double[0]; rep = new lsfitreport(); lsfit.lsfitlinear(y, fmatrix, n, m, ref info, ref c, rep.innerobj, _params); } public static void lsfitlinear(double[] y, double[,] fmatrix, out int info, out double[] c, out lsfitreport rep) { int n; int m; if( (ap.len(y)!=ap.rows(fmatrix))) throw new alglibexception("Error while calling 'lsfitlinear': looks like one of arguments has wrong size"); info = 0; c = new double[0]; rep = new lsfitreport(); n = ap.len(y); m = ap.cols(fmatrix); lsfit.lsfitlinear(y, fmatrix, n, m, ref info, ref c, rep.innerobj, null); return; } public static void lsfitlinear(double[] y, double[,] fmatrix, out int info, out double[] c, out lsfitreport rep, alglib.xparams _params) { int n; int m; if( (ap.len(y)!=ap.rows(fmatrix))) throw new alglibexception("Error while calling 'lsfitlinear': looks like one of arguments has wrong size"); info = 0; c = new double[0]; rep = new lsfitreport(); n = ap.len(y); m = ap.cols(fmatrix); lsfit.lsfitlinear(y, fmatrix, n, m, ref info, ref c, rep.innerobj, _params); return; } /************************************************************************* Constained linear least squares fitting. This is variation of LSFitLinear(), which searchs for min|A*x=b| given that K additional constaints C*x=bc are satisfied. It reduces original task to modified one: min|B*y-d| WITHOUT constraints, then LSFitLinear() is called. IMPORTANT: if you want to perform polynomial fitting, it may be more convenient to use PolynomialFit() function. This function gives best results on polynomial problems and solves numerical stability issues which arise when you fit high-degree polynomials to your data. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: Y - array[0..N-1] Function values in N points. FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. FMatrix[I,J] - value of J-th basis function in I-th point. CMatrix - a table of constaints, array[0..K-1,0..M]. I-th row of CMatrix corresponds to I-th linear constraint: CMatrix[I,0]*C[0] + ... + CMatrix[I,M-1]*C[M-1] = CMatrix[I,M] N - number of points used. N>=1. M - number of basis functions, M>=1. K - number of constraints, 0 <= K < M K=0 corresponds to absence of constraints. OUTPUT PARAMETERS: Info - error code: * -4 internal SVD decomposition subroutine failed (very rare and for degenerate systems only) * -3 either too many constraints (M or more), degenerate constraints (some constraints are repetead twice) or inconsistent constraints were specified. * 1 task is solved C - decomposition coefficients, array[0..M-1] Rep - fitting report. Following fields are set: * R2 non-adjusted coefficient of determination (non-weighted) * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroitine doesn't calculate task's condition number for K<>0. ERRORS IN PARAMETERS This solver also calculates different kinds of errors in parameters and fills corresponding fields of report: * Rep.CovPar covariance matrix for parameters, array[K,K]. * Rep.ErrPar errors in parameters, array[K], errpar = sqrt(diag(CovPar)) * Rep.ErrCurve vector of fit errors - standard deviations of empirical best-fit curve from "ideal" best-fit curve built with infinite number of samples, array[N]. errcurve = sqrt(diag(F*CovPar*F')), where F is functions matrix. * Rep.Noise vector of per-point estimates of noise, array[N] IMPORTANT: errors in parameters are calculated without taking into account boundary/linear constraints! Presence of constraints changes distribution of errors, but there is no easy way to account for constraints when you calculate covariance matrix. NOTE: noise in the data is estimated as follows: * for fitting without user-supplied weights all points are assumed to have same level of noise, which is estimated from the data * for fitting with user-supplied weights we assume that noise level in I-th point is inversely proportional to Ith weight. Coefficient of proportionality is estimated from the data. NOTE: we apply small amount of regularization when we invert squared Jacobian and calculate covariance matrix. It guarantees that algorithm won't divide by zero during inversion, but skews error estimates a bit (fractional error is about 10^-9). However, we believe that this difference is insignificant for all practical purposes except for the situation when you want to compare ALGLIB results with "reference" implementation up to the last significant digit. NOTE: covariance matrix is estimated using correction for degrees of freedom (covariances are divided by N-M instead of dividing by N). -- ALGLIB -- Copyright 07.09.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitlinearc(double[] y, double[,] fmatrix, double[,] cmatrix, int n, int m, int k, out int info, out double[] c, out lsfitreport rep) { info = 0; c = new double[0]; rep = new lsfitreport(); lsfit.lsfitlinearc(y, fmatrix, cmatrix, n, m, k, ref info, ref c, rep.innerobj, null); } public static void lsfitlinearc(double[] y, double[,] fmatrix, double[,] cmatrix, int n, int m, int k, out int info, out double[] c, out lsfitreport rep, alglib.xparams _params) { info = 0; c = new double[0]; rep = new lsfitreport(); lsfit.lsfitlinearc(y, fmatrix, cmatrix, n, m, k, ref info, ref c, rep.innerobj, _params); } public static void lsfitlinearc(double[] y, double[,] fmatrix, double[,] cmatrix, out int info, out double[] c, out lsfitreport rep) { int n; int m; int k; if( (ap.len(y)!=ap.rows(fmatrix))) throw new alglibexception("Error while calling 'lsfitlinearc': looks like one of arguments has wrong size"); if( (ap.cols(fmatrix)!=ap.cols(cmatrix)-1)) throw new alglibexception("Error while calling 'lsfitlinearc': looks like one of arguments has wrong size"); info = 0; c = new double[0]; rep = new lsfitreport(); n = ap.len(y); m = ap.cols(fmatrix); k = ap.rows(cmatrix); lsfit.lsfitlinearc(y, fmatrix, cmatrix, n, m, k, ref info, ref c, rep.innerobj, null); return; } public static void lsfitlinearc(double[] y, double[,] fmatrix, double[,] cmatrix, out int info, out double[] c, out lsfitreport rep, alglib.xparams _params) { int n; int m; int k; if( (ap.len(y)!=ap.rows(fmatrix))) throw new alglibexception("Error while calling 'lsfitlinearc': looks like one of arguments has wrong size"); if( (ap.cols(fmatrix)!=ap.cols(cmatrix)-1)) throw new alglibexception("Error while calling 'lsfitlinearc': looks like one of arguments has wrong size"); info = 0; c = new double[0]; rep = new lsfitreport(); n = ap.len(y); m = ap.cols(fmatrix); k = ap.rows(cmatrix); lsfit.lsfitlinearc(y, fmatrix, cmatrix, n, m, k, ref info, ref c, rep.innerobj, _params); return; } /************************************************************************* Weighted nonlinear least squares fitting using function values only. Combination of numerical differentiation and secant updates is used to obtain function Jacobian. Nonlinear task min(F(c)) is solved, where F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, * N is a number of points, * M is a dimension of a space points belong to, * K is a dimension of a space of parameters being fitted, * w is an N-dimensional vector of weight coefficients, * x is a set of N points, each of them is an M-dimensional vector, * c is a K-dimensional vector of parameters being fitted This subroutine uses only f(c,x[i]). INPUT PARAMETERS: X - array[0..N-1,0..M-1], points (one row = one point) Y - array[0..N-1], function values. W - weights, array[0..N-1] C - array[0..K-1], initial approximation to the solution, N - number of points, N>1 M - dimension of space K - number of parameters being fitted DiffStep- numerical differentiation step; should not be very small or large; large = loss of accuracy small = growth of round-off errors OUTPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 18.10.2008 by Bochkanov Sergey *************************************************************************/ public static void lsfitcreatewf(double[,] x, double[] y, double[] w, double[] c, int n, int m, int k, double diffstep, out lsfitstate state) { state = new lsfitstate(); lsfit.lsfitcreatewf(x, y, w, c, n, m, k, diffstep, state.innerobj, null); } public static void lsfitcreatewf(double[,] x, double[] y, double[] w, double[] c, int n, int m, int k, double diffstep, out lsfitstate state, alglib.xparams _params) { state = new lsfitstate(); lsfit.lsfitcreatewf(x, y, w, c, n, m, k, diffstep, state.innerobj, _params); } public static void lsfitcreatewf(double[,] x, double[] y, double[] w, double[] c, double diffstep, out lsfitstate state) { int n; int m; int k; if( (ap.rows(x)!=ap.len(y)) || (ap.rows(x)!=ap.len(w))) throw new alglibexception("Error while calling 'lsfitcreatewf': looks like one of arguments has wrong size"); state = new lsfitstate(); n = ap.rows(x); m = ap.cols(x); k = ap.len(c); lsfit.lsfitcreatewf(x, y, w, c, n, m, k, diffstep, state.innerobj, null); return; } public static void lsfitcreatewf(double[,] x, double[] y, double[] w, double[] c, double diffstep, out lsfitstate state, alglib.xparams _params) { int n; int m; int k; if( (ap.rows(x)!=ap.len(y)) || (ap.rows(x)!=ap.len(w))) throw new alglibexception("Error while calling 'lsfitcreatewf': looks like one of arguments has wrong size"); state = new lsfitstate(); n = ap.rows(x); m = ap.cols(x); k = ap.len(c); lsfit.lsfitcreatewf(x, y, w, c, n, m, k, diffstep, state.innerobj, _params); return; } /************************************************************************* Nonlinear least squares fitting using function values only. Combination of numerical differentiation and secant updates is used to obtain function Jacobian. Nonlinear task min(F(c)) is solved, where F(c) = (f(c,x[0])-y[0])^2 + ... + (f(c,x[n-1])-y[n-1])^2, * N is a number of points, * M is a dimension of a space points belong to, * K is a dimension of a space of parameters being fitted, * w is an N-dimensional vector of weight coefficients, * x is a set of N points, each of them is an M-dimensional vector, * c is a K-dimensional vector of parameters being fitted This subroutine uses only f(c,x[i]). INPUT PARAMETERS: X - array[0..N-1,0..M-1], points (one row = one point) Y - array[0..N-1], function values. C - array[0..K-1], initial approximation to the solution, N - number of points, N>1 M - dimension of space K - number of parameters being fitted DiffStep- numerical differentiation step; should not be very small or large; large = loss of accuracy small = growth of round-off errors OUTPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 18.10.2008 by Bochkanov Sergey *************************************************************************/ public static void lsfitcreatef(double[,] x, double[] y, double[] c, int n, int m, int k, double diffstep, out lsfitstate state) { state = new lsfitstate(); lsfit.lsfitcreatef(x, y, c, n, m, k, diffstep, state.innerobj, null); } public static void lsfitcreatef(double[,] x, double[] y, double[] c, int n, int m, int k, double diffstep, out lsfitstate state, alglib.xparams _params) { state = new lsfitstate(); lsfit.lsfitcreatef(x, y, c, n, m, k, diffstep, state.innerobj, _params); } public static void lsfitcreatef(double[,] x, double[] y, double[] c, double diffstep, out lsfitstate state) { int n; int m; int k; if( (ap.rows(x)!=ap.len(y))) throw new alglibexception("Error while calling 'lsfitcreatef': looks like one of arguments has wrong size"); state = new lsfitstate(); n = ap.rows(x); m = ap.cols(x); k = ap.len(c); lsfit.lsfitcreatef(x, y, c, n, m, k, diffstep, state.innerobj, null); return; } public static void lsfitcreatef(double[,] x, double[] y, double[] c, double diffstep, out lsfitstate state, alglib.xparams _params) { int n; int m; int k; if( (ap.rows(x)!=ap.len(y))) throw new alglibexception("Error while calling 'lsfitcreatef': looks like one of arguments has wrong size"); state = new lsfitstate(); n = ap.rows(x); m = ap.cols(x); k = ap.len(c); lsfit.lsfitcreatef(x, y, c, n, m, k, diffstep, state.innerobj, _params); return; } /************************************************************************* Weighted nonlinear least squares fitting using gradient only. Nonlinear task min(F(c)) is solved, where F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, * N is a number of points, * M is a dimension of a space points belong to, * K is a dimension of a space of parameters being fitted, * w is an N-dimensional vector of weight coefficients, * x is a set of N points, each of them is an M-dimensional vector, * c is a K-dimensional vector of parameters being fitted This subroutine uses only f(c,x[i]) and its gradient. INPUT PARAMETERS: X - array[0..N-1,0..M-1], points (one row = one point) Y - array[0..N-1], function values. W - weights, array[0..N-1] C - array[0..K-1], initial approximation to the solution, N - number of points, N>1 M - dimension of space K - number of parameters being fitted CheapFG - boolean flag, which is: * True if both function and gradient calculation complexity are less than O(M^2). An improved algorithm can be used which corresponds to FGJ scheme from MINLM unit. * False otherwise. Standard Jacibian-bases Levenberg-Marquardt algo will be used (FJ scheme). OUTPUT PARAMETERS: State - structure which stores algorithm state See also: LSFitResults LSFitCreateFG (fitting without weights) LSFitCreateWFGH (fitting using Hessian) LSFitCreateFGH (fitting using Hessian, without weights) -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitcreatewfg(double[,] x, double[] y, double[] w, double[] c, int n, int m, int k, bool cheapfg, out lsfitstate state) { state = new lsfitstate(); lsfit.lsfitcreatewfg(x, y, w, c, n, m, k, cheapfg, state.innerobj, null); } public static void lsfitcreatewfg(double[,] x, double[] y, double[] w, double[] c, int n, int m, int k, bool cheapfg, out lsfitstate state, alglib.xparams _params) { state = new lsfitstate(); lsfit.lsfitcreatewfg(x, y, w, c, n, m, k, cheapfg, state.innerobj, _params); } public static void lsfitcreatewfg(double[,] x, double[] y, double[] w, double[] c, bool cheapfg, out lsfitstate state) { int n; int m; int k; if( (ap.rows(x)!=ap.len(y)) || (ap.rows(x)!=ap.len(w))) throw new alglibexception("Error while calling 'lsfitcreatewfg': looks like one of arguments has wrong size"); state = new lsfitstate(); n = ap.rows(x); m = ap.cols(x); k = ap.len(c); lsfit.lsfitcreatewfg(x, y, w, c, n, m, k, cheapfg, state.innerobj, null); return; } public static void lsfitcreatewfg(double[,] x, double[] y, double[] w, double[] c, bool cheapfg, out lsfitstate state, alglib.xparams _params) { int n; int m; int k; if( (ap.rows(x)!=ap.len(y)) || (ap.rows(x)!=ap.len(w))) throw new alglibexception("Error while calling 'lsfitcreatewfg': looks like one of arguments has wrong size"); state = new lsfitstate(); n = ap.rows(x); m = ap.cols(x); k = ap.len(c); lsfit.lsfitcreatewfg(x, y, w, c, n, m, k, cheapfg, state.innerobj, _params); return; } /************************************************************************* Nonlinear least squares fitting using gradient only, without individual weights. Nonlinear task min(F(c)) is solved, where F(c) = ((f(c,x[0])-y[0]))^2 + ... + ((f(c,x[n-1])-y[n-1]))^2, * N is a number of points, * M is a dimension of a space points belong to, * K is a dimension of a space of parameters being fitted, * x is a set of N points, each of them is an M-dimensional vector, * c is a K-dimensional vector of parameters being fitted This subroutine uses only f(c,x[i]) and its gradient. INPUT PARAMETERS: X - array[0..N-1,0..M-1], points (one row = one point) Y - array[0..N-1], function values. C - array[0..K-1], initial approximation to the solution, N - number of points, N>1 M - dimension of space K - number of parameters being fitted CheapFG - boolean flag, which is: * True if both function and gradient calculation complexity are less than O(M^2). An improved algorithm can be used which corresponds to FGJ scheme from MINLM unit. * False otherwise. Standard Jacibian-bases Levenberg-Marquardt algo will be used (FJ scheme). OUTPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitcreatefg(double[,] x, double[] y, double[] c, int n, int m, int k, bool cheapfg, out lsfitstate state) { state = new lsfitstate(); lsfit.lsfitcreatefg(x, y, c, n, m, k, cheapfg, state.innerobj, null); } public static void lsfitcreatefg(double[,] x, double[] y, double[] c, int n, int m, int k, bool cheapfg, out lsfitstate state, alglib.xparams _params) { state = new lsfitstate(); lsfit.lsfitcreatefg(x, y, c, n, m, k, cheapfg, state.innerobj, _params); } public static void lsfitcreatefg(double[,] x, double[] y, double[] c, bool cheapfg, out lsfitstate state) { int n; int m; int k; if( (ap.rows(x)!=ap.len(y))) throw new alglibexception("Error while calling 'lsfitcreatefg': looks like one of arguments has wrong size"); state = new lsfitstate(); n = ap.rows(x); m = ap.cols(x); k = ap.len(c); lsfit.lsfitcreatefg(x, y, c, n, m, k, cheapfg, state.innerobj, null); return; } public static void lsfitcreatefg(double[,] x, double[] y, double[] c, bool cheapfg, out lsfitstate state, alglib.xparams _params) { int n; int m; int k; if( (ap.rows(x)!=ap.len(y))) throw new alglibexception("Error while calling 'lsfitcreatefg': looks like one of arguments has wrong size"); state = new lsfitstate(); n = ap.rows(x); m = ap.cols(x); k = ap.len(c); lsfit.lsfitcreatefg(x, y, c, n, m, k, cheapfg, state.innerobj, _params); return; } /************************************************************************* Weighted nonlinear least squares fitting using gradient/Hessian. Nonlinear task min(F(c)) is solved, where F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, * N is a number of points, * M is a dimension of a space points belong to, * K is a dimension of a space of parameters being fitted, * w is an N-dimensional vector of weight coefficients, * x is a set of N points, each of them is an M-dimensional vector, * c is a K-dimensional vector of parameters being fitted This subroutine uses f(c,x[i]), its gradient and its Hessian. INPUT PARAMETERS: X - array[0..N-1,0..M-1], points (one row = one point) Y - array[0..N-1], function values. W - weights, array[0..N-1] C - array[0..K-1], initial approximation to the solution, N - number of points, N>1 M - dimension of space K - number of parameters being fitted OUTPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitcreatewfgh(double[,] x, double[] y, double[] w, double[] c, int n, int m, int k, out lsfitstate state) { state = new lsfitstate(); lsfit.lsfitcreatewfgh(x, y, w, c, n, m, k, state.innerobj, null); } public static void lsfitcreatewfgh(double[,] x, double[] y, double[] w, double[] c, int n, int m, int k, out lsfitstate state, alglib.xparams _params) { state = new lsfitstate(); lsfit.lsfitcreatewfgh(x, y, w, c, n, m, k, state.innerobj, _params); } public static void lsfitcreatewfgh(double[,] x, double[] y, double[] w, double[] c, out lsfitstate state) { int n; int m; int k; if( (ap.rows(x)!=ap.len(y)) || (ap.rows(x)!=ap.len(w))) throw new alglibexception("Error while calling 'lsfitcreatewfgh': looks like one of arguments has wrong size"); state = new lsfitstate(); n = ap.rows(x); m = ap.cols(x); k = ap.len(c); lsfit.lsfitcreatewfgh(x, y, w, c, n, m, k, state.innerobj, null); return; } public static void lsfitcreatewfgh(double[,] x, double[] y, double[] w, double[] c, out lsfitstate state, alglib.xparams _params) { int n; int m; int k; if( (ap.rows(x)!=ap.len(y)) || (ap.rows(x)!=ap.len(w))) throw new alglibexception("Error while calling 'lsfitcreatewfgh': looks like one of arguments has wrong size"); state = new lsfitstate(); n = ap.rows(x); m = ap.cols(x); k = ap.len(c); lsfit.lsfitcreatewfgh(x, y, w, c, n, m, k, state.innerobj, _params); return; } /************************************************************************* Nonlinear least squares fitting using gradient/Hessian, without individial weights. Nonlinear task min(F(c)) is solved, where F(c) = ((f(c,x[0])-y[0]))^2 + ... + ((f(c,x[n-1])-y[n-1]))^2, * N is a number of points, * M is a dimension of a space points belong to, * K is a dimension of a space of parameters being fitted, * x is a set of N points, each of them is an M-dimensional vector, * c is a K-dimensional vector of parameters being fitted This subroutine uses f(c,x[i]), its gradient and its Hessian. INPUT PARAMETERS: X - array[0..N-1,0..M-1], points (one row = one point) Y - array[0..N-1], function values. C - array[0..K-1], initial approximation to the solution, N - number of points, N>1 M - dimension of space K - number of parameters being fitted OUTPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitcreatefgh(double[,] x, double[] y, double[] c, int n, int m, int k, out lsfitstate state) { state = new lsfitstate(); lsfit.lsfitcreatefgh(x, y, c, n, m, k, state.innerobj, null); } public static void lsfitcreatefgh(double[,] x, double[] y, double[] c, int n, int m, int k, out lsfitstate state, alglib.xparams _params) { state = new lsfitstate(); lsfit.lsfitcreatefgh(x, y, c, n, m, k, state.innerobj, _params); } public static void lsfitcreatefgh(double[,] x, double[] y, double[] c, out lsfitstate state) { int n; int m; int k; if( (ap.rows(x)!=ap.len(y))) throw new alglibexception("Error while calling 'lsfitcreatefgh': looks like one of arguments has wrong size"); state = new lsfitstate(); n = ap.rows(x); m = ap.cols(x); k = ap.len(c); lsfit.lsfitcreatefgh(x, y, c, n, m, k, state.innerobj, null); return; } public static void lsfitcreatefgh(double[,] x, double[] y, double[] c, out lsfitstate state, alglib.xparams _params) { int n; int m; int k; if( (ap.rows(x)!=ap.len(y))) throw new alglibexception("Error while calling 'lsfitcreatefgh': looks like one of arguments has wrong size"); state = new lsfitstate(); n = ap.rows(x); m = ap.cols(x); k = ap.len(c); lsfit.lsfitcreatefgh(x, y, c, n, m, k, state.innerobj, _params); return; } /************************************************************************* Stopping conditions for nonlinear least squares fitting. INPUT PARAMETERS: State - structure which stores algorithm state EpsX - >=0 The subroutine finishes its work if on k+1-th iteration the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - ste pvector, dx=X(k+1)-X(k) * s - scaling coefficients set by LSFitSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Only Levenberg-Marquardt iterations are counted (L-BFGS/CG iterations are NOT counted because their cost is very low compared to that of LM). NOTE Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection (according to the scheme used by MINLM unit). -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitsetcond(lsfitstate state, double epsx, int maxits) { lsfit.lsfitsetcond(state.innerobj, epsx, maxits, null); } public static void lsfitsetcond(lsfitstate state, double epsx, int maxits, alglib.xparams _params) { lsfit.lsfitsetcond(state.innerobj, epsx, maxits, _params); } /************************************************************************* This function sets maximum step length INPUT PARAMETERS: State - structure which stores algorithm state StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't want to limit step length. Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. NOTE: non-zero StpMax leads to moderate performance degradation because intermediate step of preconditioned L-BFGS optimization is incompatible with limits on step size. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ public static void lsfitsetstpmax(lsfitstate state, double stpmax) { lsfit.lsfitsetstpmax(state.innerobj, stpmax, null); } public static void lsfitsetstpmax(lsfitstate state, double stpmax, alglib.xparams _params) { lsfit.lsfitsetstpmax(state.innerobj, stpmax, _params); } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not When reports are needed, State.C (current parameters) and State.F (current value of fitting function) are reported. -- ALGLIB -- Copyright 15.08.2010 by Bochkanov Sergey *************************************************************************/ public static void lsfitsetxrep(lsfitstate state, bool needxrep) { lsfit.lsfitsetxrep(state.innerobj, needxrep, null); } public static void lsfitsetxrep(lsfitstate state, bool needxrep, alglib.xparams _params) { lsfit.lsfitsetxrep(state.innerobj, needxrep, _params); } /************************************************************************* This function sets scaling coefficients for underlying optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Generally, scale is NOT considered to be a form of preconditioner. But LM optimizer is unique in that it uses scaling matrix both in the stopping condition tests and as Marquardt damping factor. Proper scaling is very important for the algorithm performance. It is less important for the quality of results, but still has some influence (it is easier to converge when variables are properly scaled, so premature stopping is possible when very badly scalled variables are combined with relaxed stopping conditions). INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ public static void lsfitsetscale(lsfitstate state, double[] s) { lsfit.lsfitsetscale(state.innerobj, s, null); } public static void lsfitsetscale(lsfitstate state, double[] s, alglib.xparams _params) { lsfit.lsfitsetscale(state.innerobj, s, _params); } /************************************************************************* This function sets boundary constraints for underlying optimizer Boundary constraints are inactive by default (after initial creation). They are preserved until explicitly turned off with another SetBC() call. INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bounds, array[K]. If some (all) variables are unbounded, you may specify very small number or -INF (latter is recommended because it will allow solver to use better algorithm). BndU - upper bounds, array[K]. If some (all) variables are unbounded, you may specify very large number or +INF (latter is recommended because it will allow solver to use better algorithm). NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th variable will be "frozen" at X[i]=BndL[i]=BndU[i]. NOTE 2: unlike other constrained optimization algorithms, this solver has following useful properties: * bound constraints are always satisfied exactly * function is evaluated only INSIDE area specified by bound constraints -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ public static void lsfitsetbc(lsfitstate state, double[] bndl, double[] bndu) { lsfit.lsfitsetbc(state.innerobj, bndl, bndu, null); } public static void lsfitsetbc(lsfitstate state, double[] bndl, double[] bndu, alglib.xparams _params) { lsfit.lsfitsetbc(state.innerobj, bndl, bndu, _params); } /************************************************************************* This function sets linear constraints for underlying optimizer Linear constraints are inactive by default (after initial creation). They are preserved until explicitly turned off with another SetLC() call. INPUT PARAMETERS: State - structure stores algorithm state C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT IMPORTANT: if you have linear constraints, it is strongly recommended to set scale of variables with lsfitsetscale(). QP solver which is used to calculate linearly constrained steps heavily relies on good scaling of input problems. NOTE: linear (non-box) constraints are satisfied only approximately - there always exists some violation due to numerical errors and algorithmic limitations. NOTE: general linear constraints add significant overhead to solution process. Although solver performs roughly same amount of iterations (when compared with similar box-only constrained problem), each iteration now involves solution of linearly constrained QP subproblem, which requires ~3-5 times more Cholesky decompositions. Thus, if you can reformulate your problem in such way this it has only box constraints, it may be beneficial to do so. -- ALGLIB -- Copyright 29.04.2017 by Bochkanov Sergey *************************************************************************/ public static void lsfitsetlc(lsfitstate state, double[,] c, int[] ct, int k) { lsfit.lsfitsetlc(state.innerobj, c, ct, k, null); } public static void lsfitsetlc(lsfitstate state, double[,] c, int[] ct, int k, alglib.xparams _params) { lsfit.lsfitsetlc(state.innerobj, c, ct, k, _params); } public static void lsfitsetlc(lsfitstate state, double[,] c, int[] ct) { int k; if( (ap.rows(c)!=ap.len(ct))) throw new alglibexception("Error while calling 'lsfitsetlc': looks like one of arguments has wrong size"); k = ap.rows(c); lsfit.lsfitsetlc(state.innerobj, c, ct, k, null); return; } public static void lsfitsetlc(lsfitstate state, double[,] c, int[] ct, alglib.xparams _params) { int k; if( (ap.rows(c)!=ap.len(ct))) throw new alglibexception("Error while calling 'lsfitsetlc': looks like one of arguments has wrong size"); k = ap.rows(c); lsfit.lsfitsetlc(state.innerobj, c, ct, k, _params); return; } /************************************************************************* This function provides reverse communication interface Reverse communication interface is not documented or recommended to use. See below for functions which provide better documented API *************************************************************************/ public static bool lsfititeration(lsfitstate state) { return lsfit.lsfititeration(state.innerobj, null); } public static bool lsfititeration(lsfitstate state, alglib.xparams _params) { return lsfit.lsfititeration(state.innerobj, _params); } /************************************************************************* This family of functions is used to launcn iterations of nonlinear fitter These functions accept following parameters: func - callback which calculates function (or merit function) value func at given point x grad - callback which calculates function (or merit function) value func and gradient grad at given point x hess - callback which calculates function (or merit function) value func, gradient grad and Hessian hess at given point x rep - optional callback which is called after each iteration can be null obj - optional object which is passed to func/grad/hess/jac/rep can be null NOTES: 1. this algorithm is somewhat unusual because it works with parameterized function f(C,X), where X is a function argument (we have many points which are characterized by different argument values), and C is a parameter to fit. For example, if we want to do linear fit by f(c0,c1,x) = c0*x+c1, then x will be argument, and {c0,c1} will be parameters. It is important to understand that this algorithm finds minimum in the space of function PARAMETERS (not arguments), so it needs derivatives of f() with respect to C, not X. In the example above it will need f=c0*x+c1 and {df/dc0,df/dc1} = {x,1} instead of {df/dx} = {c0}. 2. Callback functions accept C as the first parameter, and X as the second 3. If state was created with LSFitCreateFG(), algorithm needs just function and its gradient, but if state was created with LSFitCreateFGH(), algorithm will need function, gradient and Hessian. According to the said above, there ase several versions of this function, which accept different sets of callbacks. This flexibility opens way to subtle errors - you may create state with LSFitCreateFGH() (optimization using Hessian), but call function which does not accept Hessian. So when algorithm will request Hessian, there will be no callback to call. In this case exception will be thrown. Be careful to avoid such errors because there is no way to find them at compile time - you can see them at runtime only. -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitfit(lsfitstate state, ndimensional_pfunc func, ndimensional_rep rep, object obj) { lsfitfit(state, func, rep, obj, null); } public static void lsfitfit(lsfitstate state, ndimensional_pfunc func, ndimensional_rep rep, object obj, alglib.xparams _params) { if( func==null ) throw new alglibexception("ALGLIB: error in 'lsfitfit()' (func is null)"); while( alglib.lsfititeration(state, _params) ) { if( state.needf ) { func(state.c, state.x, ref state.innerobj.f, obj); continue; } if( state.innerobj.xupdated ) { if( rep!=null ) rep(state.innerobj.c, state.innerobj.f, obj); continue; } throw new alglibexception("ALGLIB: error in 'lsfitfit' (some derivatives were not provided?)"); } } public static void lsfitfit(lsfitstate state, ndimensional_pfunc func, ndimensional_pgrad grad, ndimensional_rep rep, object obj) { lsfitfit(state, func, grad, rep, obj, null); } public static void lsfitfit(lsfitstate state, ndimensional_pfunc func, ndimensional_pgrad grad, ndimensional_rep rep, object obj, alglib.xparams _params) { if( func==null ) throw new alglibexception("ALGLIB: error in 'lsfitfit()' (func is null)"); if( grad==null ) throw new alglibexception("ALGLIB: error in 'lsfitfit()' (grad is null)"); while( alglib.lsfititeration(state, _params) ) { if( state.needf ) { func(state.c, state.x, ref state.innerobj.f, obj); continue; } if( state.needfg ) { grad(state.c, state.x, ref state.innerobj.f, state.innerobj.g, obj); continue; } if( state.innerobj.xupdated ) { if( rep!=null ) rep(state.innerobj.c, state.innerobj.f, obj); continue; } throw new alglibexception("ALGLIB: error in 'lsfitfit' (some derivatives were not provided?)"); } } public static void lsfitfit(lsfitstate state, ndimensional_pfunc func, ndimensional_pgrad grad, ndimensional_phess hess, ndimensional_rep rep, object obj) { lsfitfit(state, func, grad, hess, rep, obj, null); } public static void lsfitfit(lsfitstate state, ndimensional_pfunc func, ndimensional_pgrad grad, ndimensional_phess hess, ndimensional_rep rep, object obj, alglib.xparams _params) { if( func==null ) throw new alglibexception("ALGLIB: error in 'lsfitfit()' (func is null)"); if( grad==null ) throw new alglibexception("ALGLIB: error in 'lsfitfit()' (grad is null)"); if( hess==null ) throw new alglibexception("ALGLIB: error in 'lsfitfit()' (hess is null)"); while( alglib.lsfititeration(state, _params) ) { if( state.needf ) { func(state.c, state.x, ref state.innerobj.f, obj); continue; } if( state.needfg ) { grad(state.c, state.x, ref state.innerobj.f, state.innerobj.g, obj); continue; } if( state.needfgh ) { hess(state.c, state.x, ref state.innerobj.f, state.innerobj.g, state.innerobj.h, obj); continue; } if( state.innerobj.xupdated ) { if( rep!=null ) rep(state.innerobj.c, state.innerobj.f, obj); continue; } throw new alglibexception("ALGLIB: error in 'lsfitfit' (some derivatives were not provided?)"); } } /************************************************************************* Nonlinear least squares fitting results. Called after return from LSFitFit(). INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: Info - completion code: * -8 optimizer detected NAN/INF in the target function and/or gradient * -7 gradient verification failed. See LSFitSetGradientCheck() for more information. * -3 inconsistent constraints * 2 relative step is no more than EpsX. * 5 MaxIts steps was taken * 7 stopping conditions are too stringent, further improvement is impossible C - array[0..K-1], solution Rep - optimization report. On success following fields are set: * R2 non-adjusted coefficient of determination (non-weighted) * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED * WRMSError weighted rms error on the (X,Y). ERRORS IN PARAMETERS This solver also calculates different kinds of errors in parameters and fills corresponding fields of report: * Rep.CovPar covariance matrix for parameters, array[K,K]. * Rep.ErrPar errors in parameters, array[K], errpar = sqrt(diag(CovPar)) * Rep.ErrCurve vector of fit errors - standard deviations of empirical best-fit curve from "ideal" best-fit curve built with infinite number of samples, array[N]. errcurve = sqrt(diag(J*CovPar*J')), where J is Jacobian matrix. * Rep.Noise vector of per-point estimates of noise, array[N] IMPORTANT: errors in parameters are calculated without taking into account boundary/linear constraints! Presence of constraints changes distribution of errors, but there is no easy way to account for constraints when you calculate covariance matrix. NOTE: noise in the data is estimated as follows: * for fitting without user-supplied weights all points are assumed to have same level of noise, which is estimated from the data * for fitting with user-supplied weights we assume that noise level in I-th point is inversely proportional to Ith weight. Coefficient of proportionality is estimated from the data. NOTE: we apply small amount of regularization when we invert squared Jacobian and calculate covariance matrix. It guarantees that algorithm won't divide by zero during inversion, but skews error estimates a bit (fractional error is about 10^-9). However, we believe that this difference is insignificant for all practical purposes except for the situation when you want to compare ALGLIB results with "reference" implementation up to the last significant digit. NOTE: covariance matrix is estimated using correction for degrees of freedom (covariances are divided by N-M instead of dividing by N). -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitresults(lsfitstate state, out int info, out double[] c, out lsfitreport rep) { info = 0; c = new double[0]; rep = new lsfitreport(); lsfit.lsfitresults(state.innerobj, ref info, ref c, rep.innerobj, null); } public static void lsfitresults(lsfitstate state, out int info, out double[] c, out lsfitreport rep, alglib.xparams _params) { info = 0; c = new double[0]; rep = new lsfitreport(); lsfit.lsfitresults(state.innerobj, ref info, ref c, rep.innerobj, _params); } /************************************************************************* This subroutine turns on verification of the user-supplied analytic gradient: * user calls this subroutine before fitting begins * LSFitFit() is called * prior to actual fitting, for each point in data set X_i and each component of parameters being fited C_j algorithm performs following steps: * two trial steps are made to C_j-TestStep*S[j] and C_j+TestStep*S[j], where C_j is j-th parameter and S[j] is a scale of j-th parameter * if needed, steps are bounded with respect to constraints on C[] * F(X_i|C) is evaluated at these trial points * we perform one more evaluation in the middle point of the interval * we build cubic model using function values and derivatives at trial points and we compare its prediction with actual value in the middle point * in case difference between prediction and actual value is higher than some predetermined threshold, algorithm stops with completion code -7; Rep.VarIdx is set to index of the parameter with incorrect derivative. * after verification is over, algorithm proceeds to the actual optimization. NOTE 1: verification needs N*K (points count * parameters count) gradient evaluations. It is very costly and you should use it only for low dimensional problems, when you want to be sure that you've correctly calculated analytic derivatives. You should not use it in the production code (unless you want to check derivatives provided by some third party). NOTE 2: you should carefully choose TestStep. Value which is too large (so large that function behaviour is significantly non-cubic) will lead to false alarms. You may use different step for different parameters by means of setting scale with LSFitSetScale(). NOTE 3: this function may lead to false positives. In case it reports that I-th derivative was calculated incorrectly, you may decrease test step and try one more time - maybe your function changes too sharply and your step is too large for such rapidly chanding function. NOTE 4: this function works only for optimizers created with LSFitCreateWFG() or LSFitCreateFG() constructors. INPUT PARAMETERS: State - structure used to store algorithm state TestStep - verification step: * TestStep=0 turns verification off * TestStep>0 activates verification -- ALGLIB -- Copyright 15.06.2012 by Bochkanov Sergey *************************************************************************/ public static void lsfitsetgradientcheck(lsfitstate state, double teststep) { lsfit.lsfitsetgradientcheck(state.innerobj, teststep, null); } public static void lsfitsetgradientcheck(lsfitstate state, double teststep, alglib.xparams _params) { lsfit.lsfitsetgradientcheck(state.innerobj, teststep, _params); } } public partial class alglib { } public partial class alglib { /************************************************************************* 2-dimensional spline inteprolant *************************************************************************/ public class spline2dinterpolant : alglibobject { // // Public declarations // public spline2dinterpolant() { _innerobj = new spline2d.spline2dinterpolant(); } public override alglib.alglibobject make_copy() { return new spline2dinterpolant((spline2d.spline2dinterpolant)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private spline2d.spline2dinterpolant _innerobj; public spline2d.spline2dinterpolant innerobj { get { return _innerobj; } } public spline2dinterpolant(spline2d.spline2dinterpolant obj) { _innerobj = obj; } } /************************************************************************* Nonlinear least squares solver used to fit 2D splines to data *************************************************************************/ public class spline2dbuilder : alglibobject { // // Public declarations // public spline2dbuilder() { _innerobj = new spline2d.spline2dbuilder(); } public override alglib.alglibobject make_copy() { return new spline2dbuilder((spline2d.spline2dbuilder)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private spline2d.spline2dbuilder _innerobj; public spline2d.spline2dbuilder innerobj { get { return _innerobj; } } public spline2dbuilder(spline2d.spline2dbuilder obj) { _innerobj = obj; } } /************************************************************************* Spline 2D fitting report: rmserror RMS error avgerror average error maxerror maximum error r2 coefficient of determination, R-squared, 1-RSS/TSS *************************************************************************/ public class spline2dfitreport : alglibobject { // // Public declarations // public double rmserror { get { return _innerobj.rmserror; } set { _innerobj.rmserror = value; } } public double avgerror { get { return _innerobj.avgerror; } set { _innerobj.avgerror = value; } } public double maxerror { get { return _innerobj.maxerror; } set { _innerobj.maxerror = value; } } public double r2 { get { return _innerobj.r2; } set { _innerobj.r2 = value; } } public spline2dfitreport() { _innerobj = new spline2d.spline2dfitreport(); } public override alglib.alglibobject make_copy() { return new spline2dfitreport((spline2d.spline2dfitreport)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private spline2d.spline2dfitreport _innerobj; public spline2d.spline2dfitreport innerobj { get { return _innerobj; } } public spline2dfitreport(spline2d.spline2dfitreport obj) { _innerobj = obj; } } /************************************************************************* This function serializes data structure to string. Important properties of s_out: * it contains alphanumeric characters, dots, underscores, minus signs * these symbols are grouped into words, which are separated by spaces and Windows-style (CR+LF) newlines * although serializer uses spaces and CR+LF as separators, you can replace any separator character by arbitrary combination of spaces, tabs, Windows or Unix newlines. It allows flexible reformatting of the string in case you want to include it into text or XML file. But you should not insert separators into the middle of the "words" nor you should change case of letters. * s_out can be freely moved between 32-bit and 64-bit systems, little and big endian machines, and so on. You can serialize structure on 32-bit machine and unserialize it on 64-bit one (or vice versa), or serialize it on SPARC and unserialize on x86. You can also serialize it in C# version of ALGLIB and unserialize in C++ one, and vice versa. *************************************************************************/ public static void spline2dserialize(spline2dinterpolant obj, out string s_out) { alglib.serializer s = new alglib.serializer(); s.alloc_start(); spline2d.spline2dalloc(s, obj.innerobj, null); s.sstart_str(); spline2d.spline2dserialize(s, obj.innerobj, null); s.stop(); s_out = s.get_string(); } /************************************************************************* This function unserializes data structure from string. *************************************************************************/ public static void spline2dunserialize(string s_in, out spline2dinterpolant obj) { alglib.serializer s = new alglib.serializer(); obj = new spline2dinterpolant(); s.ustart_str(s_in); spline2d.spline2dunserialize(s, obj.innerobj, null); s.stop(); } /************************************************************************* This function serializes data structure to stream. Data stream generated by this function is same as string representation generated by string version of serializer - alphanumeric characters, dots, underscores, minus signs, which are grouped into words separated by spaces and CR+LF. We recommend you to read comments on string version of serializer to find out more about serialization of AlGLIB objects. *************************************************************************/ public static void spline2dserialize(spline2dinterpolant obj, System.IO.Stream stream_out) { alglib.serializer s = new alglib.serializer(); s.alloc_start(); spline2d.spline2dalloc(s, obj.innerobj, null); s.sstart_stream(stream_out); spline2d.spline2dserialize(s, obj.innerobj, null); s.stop(); } /************************************************************************* This function unserializes data structure from stream. *************************************************************************/ public static void spline2dunserialize(System.IO.Stream stream_in, out spline2dinterpolant obj) { alglib.serializer s = new alglib.serializer(); obj = new spline2dinterpolant(); s.ustart_stream(stream_in); spline2d.spline2dunserialize(s, obj.innerobj, null); s.stop(); } /************************************************************************* This subroutine calculates the value of the bilinear or bicubic spline at the given point X. Input parameters: C - 2D spline object. Built by spline2dbuildbilinearv or spline2dbuildbicubicv. X, Y- point Result: S(x,y) -- ALGLIB PROJECT -- Copyright 05.07.2007 by Bochkanov Sergey *************************************************************************/ public static double spline2dcalc(spline2dinterpolant c, double x, double y) { return spline2d.spline2dcalc(c.innerobj, x, y, null); } public static double spline2dcalc(spline2dinterpolant c, double x, double y, alglib.xparams _params) { return spline2d.spline2dcalc(c.innerobj, x, y, _params); } /************************************************************************* This subroutine calculates the value of the bilinear or bicubic spline at the given point X and its derivatives. Input parameters: C - spline interpolant. X, Y- point Output parameters: F - S(x,y) FX - dS(x,y)/dX FY - dS(x,y)/dY FXY - d2S(x,y)/dXdY -- ALGLIB PROJECT -- Copyright 05.07.2007 by Bochkanov Sergey *************************************************************************/ public static void spline2ddiff(spline2dinterpolant c, double x, double y, out double f, out double fx, out double fy, out double fxy) { f = 0; fx = 0; fy = 0; fxy = 0; spline2d.spline2ddiff(c.innerobj, x, y, ref f, ref fx, ref fy, ref fxy, null); } public static void spline2ddiff(spline2dinterpolant c, double x, double y, out double f, out double fx, out double fy, out double fxy, alglib.xparams _params) { f = 0; fx = 0; fy = 0; fxy = 0; spline2d.spline2ddiff(c.innerobj, x, y, ref f, ref fx, ref fy, ref fxy, _params); } /************************************************************************* This subroutine calculates bilinear or bicubic vector-valued spline at the given point (X,Y). If you need just some specific component of vector-valued spline, you can use spline2dcalcvi() function. INPUT PARAMETERS: C - spline interpolant. X, Y- point F - output buffer, possibly preallocated array. In case array size is large enough to store result, it is not reallocated. Array which is too short will be reallocated OUTPUT PARAMETERS: F - array[D] (or larger) which stores function values -- ALGLIB PROJECT -- Copyright 01.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dcalcvbuf(spline2dinterpolant c, double x, double y, ref double[] f) { spline2d.spline2dcalcvbuf(c.innerobj, x, y, ref f, null); } public static void spline2dcalcvbuf(spline2dinterpolant c, double x, double y, ref double[] f, alglib.xparams _params) { spline2d.spline2dcalcvbuf(c.innerobj, x, y, ref f, _params); } /************************************************************************* This subroutine calculates specific component of vector-valued bilinear or bicubic spline at the given point (X,Y). INPUT PARAMETERS: C - spline interpolant. X, Y- point I - component index, in [0,D). An exception is generated for out of range values. RESULT: value of I-th component -- ALGLIB PROJECT -- Copyright 01.02.2018 by Bochkanov Sergey *************************************************************************/ public static double spline2dcalcvi(spline2dinterpolant c, double x, double y, int i) { return spline2d.spline2dcalcvi(c.innerobj, x, y, i, null); } public static double spline2dcalcvi(spline2dinterpolant c, double x, double y, int i, alglib.xparams _params) { return spline2d.spline2dcalcvi(c.innerobj, x, y, i, _params); } /************************************************************************* This subroutine calculates bilinear or bicubic vector-valued spline at the given point (X,Y). INPUT PARAMETERS: C - spline interpolant. X, Y- point OUTPUT PARAMETERS: F - array[D] which stores function values. F is out-parameter and it is reallocated after call to this function. In case you want to reuse previously allocated F, you may use Spline2DCalcVBuf(), which reallocates F only when it is too small. -- ALGLIB PROJECT -- Copyright 16.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline2dcalcv(spline2dinterpolant c, double x, double y, out double[] f) { f = new double[0]; spline2d.spline2dcalcv(c.innerobj, x, y, ref f, null); } public static void spline2dcalcv(spline2dinterpolant c, double x, double y, out double[] f, alglib.xparams _params) { f = new double[0]; spline2d.spline2dcalcv(c.innerobj, x, y, ref f, _params); } /************************************************************************* This subroutine calculates value of specific component of bilinear or bicubic vector-valued spline and its derivatives. Input parameters: C - spline interpolant. X, Y- point I - component index, in [0,D) Output parameters: F - S(x,y) FX - dS(x,y)/dX FY - dS(x,y)/dY FXY - d2S(x,y)/dXdY -- ALGLIB PROJECT -- Copyright 05.07.2007 by Bochkanov Sergey *************************************************************************/ public static void spline2ddiffvi(spline2dinterpolant c, double x, double y, int i, out double f, out double fx, out double fy, out double fxy) { f = 0; fx = 0; fy = 0; fxy = 0; spline2d.spline2ddiffvi(c.innerobj, x, y, i, ref f, ref fx, ref fy, ref fxy, null); } public static void spline2ddiffvi(spline2dinterpolant c, double x, double y, int i, out double f, out double fx, out double fy, out double fxy, alglib.xparams _params) { f = 0; fx = 0; fy = 0; fxy = 0; spline2d.spline2ddiffvi(c.innerobj, x, y, i, ref f, ref fx, ref fy, ref fxy, _params); } /************************************************************************* This subroutine performs linear transformation of the spline argument. Input parameters: C - spline interpolant AX, BX - transformation coefficients: x = A*t + B AY, BY - transformation coefficients: y = A*u + B Result: C - transformed spline -- ALGLIB PROJECT -- Copyright 30.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline2dlintransxy(spline2dinterpolant c, double ax, double bx, double ay, double by) { spline2d.spline2dlintransxy(c.innerobj, ax, bx, ay, by, null); } public static void spline2dlintransxy(spline2dinterpolant c, double ax, double bx, double ay, double by, alglib.xparams _params) { spline2d.spline2dlintransxy(c.innerobj, ax, bx, ay, by, _params); } /************************************************************************* This subroutine performs linear transformation of the spline. Input parameters: C - spline interpolant. A, B- transformation coefficients: S2(x,y) = A*S(x,y) + B Output parameters: C - transformed spline -- ALGLIB PROJECT -- Copyright 30.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline2dlintransf(spline2dinterpolant c, double a, double b) { spline2d.spline2dlintransf(c.innerobj, a, b, null); } public static void spline2dlintransf(spline2dinterpolant c, double a, double b, alglib.xparams _params) { spline2d.spline2dlintransf(c.innerobj, a, b, _params); } /************************************************************************* This subroutine makes the copy of the spline model. Input parameters: C - spline interpolant Output parameters: CC - spline copy -- ALGLIB PROJECT -- Copyright 29.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline2dcopy(spline2dinterpolant c, out spline2dinterpolant cc) { cc = new spline2dinterpolant(); spline2d.spline2dcopy(c.innerobj, cc.innerobj, null); } public static void spline2dcopy(spline2dinterpolant c, out spline2dinterpolant cc, alglib.xparams _params) { cc = new spline2dinterpolant(); spline2d.spline2dcopy(c.innerobj, cc.innerobj, _params); } /************************************************************************* Bicubic spline resampling Input parameters: A - function values at the old grid, array[0..OldHeight-1, 0..OldWidth-1] OldHeight - old grid height, OldHeight>1 OldWidth - old grid width, OldWidth>1 NewHeight - new grid height, NewHeight>1 NewWidth - new grid width, NewWidth>1 Output parameters: B - function values at the new grid, array[0..NewHeight-1, 0..NewWidth-1] -- ALGLIB routine -- 15 May, 2007 Copyright by Bochkanov Sergey *************************************************************************/ public static void spline2dresamplebicubic(double[,] a, int oldheight, int oldwidth, out double[,] b, int newheight, int newwidth) { b = new double[0,0]; spline2d.spline2dresamplebicubic(a, oldheight, oldwidth, ref b, newheight, newwidth, null); } public static void spline2dresamplebicubic(double[,] a, int oldheight, int oldwidth, out double[,] b, int newheight, int newwidth, alglib.xparams _params) { b = new double[0,0]; spline2d.spline2dresamplebicubic(a, oldheight, oldwidth, ref b, newheight, newwidth, _params); } /************************************************************************* Bilinear spline resampling Input parameters: A - function values at the old grid, array[0..OldHeight-1, 0..OldWidth-1] OldHeight - old grid height, OldHeight>1 OldWidth - old grid width, OldWidth>1 NewHeight - new grid height, NewHeight>1 NewWidth - new grid width, NewWidth>1 Output parameters: B - function values at the new grid, array[0..NewHeight-1, 0..NewWidth-1] -- ALGLIB routine -- 09.07.2007 Copyright by Bochkanov Sergey *************************************************************************/ public static void spline2dresamplebilinear(double[,] a, int oldheight, int oldwidth, out double[,] b, int newheight, int newwidth) { b = new double[0,0]; spline2d.spline2dresamplebilinear(a, oldheight, oldwidth, ref b, newheight, newwidth, null); } public static void spline2dresamplebilinear(double[,] a, int oldheight, int oldwidth, out double[,] b, int newheight, int newwidth, alglib.xparams _params) { b = new double[0,0]; spline2d.spline2dresamplebilinear(a, oldheight, oldwidth, ref b, newheight, newwidth, _params); } /************************************************************************* This subroutine builds bilinear vector-valued spline. Input parameters: X - spline abscissas, array[0..N-1] Y - spline ordinates, array[0..M-1] F - function values, array[0..M*N*D-1]: * first D elements store D values at (X[0],Y[0]) * next D elements store D values at (X[1],Y[0]) * general form - D function values at (X[i],Y[j]) are stored at F[D*(J*N+I)...D*(J*N+I)+D-1]. M,N - grid size, M>=2, N>=2 D - vector dimension, D>=1 Output parameters: C - spline interpolant -- ALGLIB PROJECT -- Copyright 16.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildbilinearv(double[] x, int n, double[] y, int m, double[] f, int d, out spline2dinterpolant c) { c = new spline2dinterpolant(); spline2d.spline2dbuildbilinearv(x, n, y, m, f, d, c.innerobj, null); } public static void spline2dbuildbilinearv(double[] x, int n, double[] y, int m, double[] f, int d, out spline2dinterpolant c, alglib.xparams _params) { c = new spline2dinterpolant(); spline2d.spline2dbuildbilinearv(x, n, y, m, f, d, c.innerobj, _params); } /************************************************************************* This subroutine builds bicubic vector-valued spline. Input parameters: X - spline abscissas, array[0..N-1] Y - spline ordinates, array[0..M-1] F - function values, array[0..M*N*D-1]: * first D elements store D values at (X[0],Y[0]) * next D elements store D values at (X[1],Y[0]) * general form - D function values at (X[i],Y[j]) are stored at F[D*(J*N+I)...D*(J*N+I)+D-1]. M,N - grid size, M>=2, N>=2 D - vector dimension, D>=1 Output parameters: C - spline interpolant -- ALGLIB PROJECT -- Copyright 16.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildbicubicv(double[] x, int n, double[] y, int m, double[] f, int d, out spline2dinterpolant c) { c = new spline2dinterpolant(); spline2d.spline2dbuildbicubicv(x, n, y, m, f, d, c.innerobj, null); } public static void spline2dbuildbicubicv(double[] x, int n, double[] y, int m, double[] f, int d, out spline2dinterpolant c, alglib.xparams _params) { c = new spline2dinterpolant(); spline2d.spline2dbuildbicubicv(x, n, y, m, f, d, c.innerobj, _params); } /************************************************************************* This subroutine unpacks two-dimensional spline into the coefficients table Input parameters: C - spline interpolant. Result: M, N- grid size (x-axis and y-axis) D - number of components Tbl - coefficients table, unpacked format, D - components: [0..(N-1)*(M-1)*D-1, 0..19]. For T=0..D-1 (component index), I = 0...N-2 (x index), J=0..M-2 (y index): K := T + I*D + J*D*(N-1) K-th row stores decomposition for T-th component of the vector-valued function Tbl[K,0] = X[i] Tbl[K,1] = X[i+1] Tbl[K,2] = Y[j] Tbl[K,3] = Y[j+1] Tbl[K,4] = C00 Tbl[K,5] = C01 Tbl[K,6] = C02 Tbl[K,7] = C03 Tbl[K,8] = C10 Tbl[K,9] = C11 ... Tbl[K,19] = C33 On each grid square spline is equals to: S(x) = SUM(c[i,j]*(t^i)*(u^j), i=0..3, j=0..3) t = x-x[j] u = y-y[i] -- ALGLIB PROJECT -- Copyright 16.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline2dunpackv(spline2dinterpolant c, out int m, out int n, out int d, out double[,] tbl) { m = 0; n = 0; d = 0; tbl = new double[0,0]; spline2d.spline2dunpackv(c.innerobj, ref m, ref n, ref d, ref tbl, null); } public static void spline2dunpackv(spline2dinterpolant c, out int m, out int n, out int d, out double[,] tbl, alglib.xparams _params) { m = 0; n = 0; d = 0; tbl = new double[0,0]; spline2d.spline2dunpackv(c.innerobj, ref m, ref n, ref d, ref tbl, _params); } /************************************************************************* This subroutine was deprecated in ALGLIB 3.6.0 We recommend you to switch to Spline2DBuildBilinearV(), which is more flexible and accepts its arguments in more convenient order. -- ALGLIB PROJECT -- Copyright 05.07.2007 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildbilinear(double[] x, double[] y, double[,] f, int m, int n, out spline2dinterpolant c) { c = new spline2dinterpolant(); spline2d.spline2dbuildbilinear(x, y, f, m, n, c.innerobj, null); } public static void spline2dbuildbilinear(double[] x, double[] y, double[,] f, int m, int n, out spline2dinterpolant c, alglib.xparams _params) { c = new spline2dinterpolant(); spline2d.spline2dbuildbilinear(x, y, f, m, n, c.innerobj, _params); } /************************************************************************* This subroutine was deprecated in ALGLIB 3.6.0 We recommend you to switch to Spline2DBuildBicubicV(), which is more flexible and accepts its arguments in more convenient order. -- ALGLIB PROJECT -- Copyright 05.07.2007 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildbicubic(double[] x, double[] y, double[,] f, int m, int n, out spline2dinterpolant c) { c = new spline2dinterpolant(); spline2d.spline2dbuildbicubic(x, y, f, m, n, c.innerobj, null); } public static void spline2dbuildbicubic(double[] x, double[] y, double[,] f, int m, int n, out spline2dinterpolant c, alglib.xparams _params) { c = new spline2dinterpolant(); spline2d.spline2dbuildbicubic(x, y, f, m, n, c.innerobj, _params); } /************************************************************************* This subroutine was deprecated in ALGLIB 3.6.0 We recommend you to switch to Spline2DUnpackV(), which is more flexible and accepts its arguments in more convenient order. -- ALGLIB PROJECT -- Copyright 29.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline2dunpack(spline2dinterpolant c, out int m, out int n, out double[,] tbl) { m = 0; n = 0; tbl = new double[0,0]; spline2d.spline2dunpack(c.innerobj, ref m, ref n, ref tbl, null); } public static void spline2dunpack(spline2dinterpolant c, out int m, out int n, out double[,] tbl, alglib.xparams _params) { m = 0; n = 0; tbl = new double[0,0]; spline2d.spline2dunpack(c.innerobj, ref m, ref n, ref tbl, _params); } /************************************************************************* This subroutine creates least squares solver used to fit 2D splines to irregularly sampled (scattered) data. Solver object is used to perform spline fits as follows: * solver object is created with spline2dbuildercreate() function * dataset is added with spline2dbuildersetpoints() function * fit area is chosen: * spline2dbuildersetarea() - for user-defined area * spline2dbuildersetareaauto() - for automatically chosen area * number of grid nodes is chosen with spline2dbuildersetgrid() * prior term is chosen with one of the following functions: * spline2dbuildersetlinterm() to set linear prior * spline2dbuildersetconstterm() to set constant prior * spline2dbuildersetzeroterm() to set zero prior * spline2dbuildersetuserterm() to set user-defined constant prior * solver algorithm is chosen with either: * spline2dbuildersetalgoblocklls() - BlockLLS algorithm, medium-scale problems * spline2dbuildersetalgofastddm() - FastDDM algorithm, large-scale problems * finally, fitting itself is performed with spline2dfit() function. Most of the steps above can be omitted, solver is configured with good defaults. The minimum is to call: * spline2dbuildercreate() to create solver object * spline2dbuildersetpoints() to specify dataset * spline2dbuildersetgrid() to tell how many nodes you need * spline2dfit() to perform fit ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: D - positive number, number of Y-components: D=1 for simple scalar fit, D>1 for vector-valued spline fitting. OUTPUT PARAMETERS: S - solver object -- ALGLIB PROJECT -- Copyright 29.01.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildercreate(int d, out spline2dbuilder state) { state = new spline2dbuilder(); spline2d.spline2dbuildercreate(d, state.innerobj, null); } public static void spline2dbuildercreate(int d, out spline2dbuilder state, alglib.xparams _params) { state = new spline2dbuilder(); spline2d.spline2dbuildercreate(d, state.innerobj, _params); } /************************************************************************* This function sets constant prior term (model is a sum of bicubic spline and global prior, which can be linear, constant, user-defined constant or zero). Constant prior term is determined by least squares fitting. INPUT PARAMETERS: S - spline builder V - value for user-defined prior -- ALGLIB -- Copyright 01.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetuserterm(spline2dbuilder state, double v) { spline2d.spline2dbuildersetuserterm(state.innerobj, v, null); } public static void spline2dbuildersetuserterm(spline2dbuilder state, double v, alglib.xparams _params) { spline2d.spline2dbuildersetuserterm(state.innerobj, v, _params); } /************************************************************************* This function sets linear prior term (model is a sum of bicubic spline and global prior, which can be linear, constant, user-defined constant or zero). Linear prior term is determined by least squares fitting. INPUT PARAMETERS: S - spline builder -- ALGLIB -- Copyright 01.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetlinterm(spline2dbuilder state) { spline2d.spline2dbuildersetlinterm(state.innerobj, null); } public static void spline2dbuildersetlinterm(spline2dbuilder state, alglib.xparams _params) { spline2d.spline2dbuildersetlinterm(state.innerobj, _params); } /************************************************************************* This function sets constant prior term (model is a sum of bicubic spline and global prior, which can be linear, constant, user-defined constant or zero). Constant prior term is determined by least squares fitting. INPUT PARAMETERS: S - spline builder -- ALGLIB -- Copyright 01.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetconstterm(spline2dbuilder state) { spline2d.spline2dbuildersetconstterm(state.innerobj, null); } public static void spline2dbuildersetconstterm(spline2dbuilder state, alglib.xparams _params) { spline2d.spline2dbuildersetconstterm(state.innerobj, _params); } /************************************************************************* This function sets zero prior term (model is a sum of bicubic spline and global prior, which can be linear, constant, user-defined constant or zero). INPUT PARAMETERS: S - spline builder -- ALGLIB -- Copyright 01.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetzeroterm(spline2dbuilder state) { spline2d.spline2dbuildersetzeroterm(state.innerobj, null); } public static void spline2dbuildersetzeroterm(spline2dbuilder state, alglib.xparams _params) { spline2d.spline2dbuildersetzeroterm(state.innerobj, _params); } /************************************************************************* This function adds dataset to the builder object. This function overrides results of the previous calls, i.e. multiple calls of this function will result in only the last set being added. INPUT PARAMETERS: S - spline 2D builder object XY - points, array[N,2+D]. One row corresponds to one point in the dataset. First 2 elements are coordinates, next D elements are function values. Array may be larger than specified, in this case only leading [N,NX+NY] elements will be used. N - number of points in the dataset -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetpoints(spline2dbuilder state, double[,] xy, int n) { spline2d.spline2dbuildersetpoints(state.innerobj, xy, n, null); } public static void spline2dbuildersetpoints(spline2dbuilder state, double[,] xy, int n, alglib.xparams _params) { spline2d.spline2dbuildersetpoints(state.innerobj, xy, n, _params); } /************************************************************************* This function sets area where 2D spline interpolant is built. "Auto" means that area extent is determined automatically from dataset extent. INPUT PARAMETERS: S - spline 2D builder object -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetareaauto(spline2dbuilder state) { spline2d.spline2dbuildersetareaauto(state.innerobj, null); } public static void spline2dbuildersetareaauto(spline2dbuilder state, alglib.xparams _params) { spline2d.spline2dbuildersetareaauto(state.innerobj, _params); } /************************************************************************* This function sets area where 2D spline interpolant is built to user-defined one: [XA,XB]*[YA,YB] INPUT PARAMETERS: S - spline 2D builder object XA,XB - spatial extent in the first (X) dimension, XA=1 means that up to chosen number of bottom layers is fitted * NLayers=0 means that maximum number of layers is chosen (according to current grid size) * NLayers<=-1 means that up to |NLayers| topmost layers is skipped Recommendations: * good "default" value is 2 layers * you may need more layers, if your dataset is very irregular and you want to "patch" large holes. For a grid step H (equal to AreaWidth/GridSize) you may expect that last layer reproduces variations at distance H (and can patch holes that wide); that higher layers operate at distances 2*H, 4*H, 8*H and so on. * good value for "bullletproof" mode is NLayers=0, which results in complete hierarchy of layers being generated. LambdaV - regularization coefficient, chosen in such a way that it penalizes bottom layers (fine details) first. LambdaV>=0, zero value means that no penalty is applied. -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetalgofastddm(spline2dbuilder state, int nlayers, double lambdav) { spline2d.spline2dbuildersetalgofastddm(state.innerobj, nlayers, lambdav, null); } public static void spline2dbuildersetalgofastddm(spline2dbuilder state, int nlayers, double lambdav, alglib.xparams _params) { spline2d.spline2dbuildersetalgofastddm(state.innerobj, nlayers, lambdav, _params); } /************************************************************************* This function allows you to choose least squares solver used to perform fitting. This function sets solver algorithm to "BlockLLS", which performs least squares fitting with fast sparse direct solver, with optional nonsmoothness penalty being applied. Nonlinearity penalty has the following form: [ ] P() ~ Lambda* integral[ (d2S/dx2)^2 + 2*(d2S/dxdy)^2 + (d2S/dy2)^2 ]dxdy [ ] here integral is calculated over entire grid, and "~" means "proportional" because integral is normalized after calcilation. Extremely large values of Lambda result in linear fit being performed. NOTE: this algorithm is the most robust and controllable one, but it is limited by 512x512 grids and (say) up to 1.000.000 points. However, ALGLIB has one more spline solver: FastDDM algorithm, which is intended for really large-scale problems (in 10M-100M range). FastDDM algorithm also has better parallelism properties. More information on BlockLLS solver: * memory requirements: ~[32*K^3+256*NPoints] bytes for KxK grid with NPoints-sized dataset * serial running time: O(K^4+NPoints) * parallelism potential: limited. You may get some sublinear gain when working with large grids (K's in 256..512 range) ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: S - spline 2D builder object LambdaNS- non-negative value: * positive value means that some smoothing is applied * zero value means that no smoothing is applied, and corresponding entries of design matrix are numerically zero and dropped from consideration. -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetalgoblocklls(spline2dbuilder state, double lambdans) { spline2d.spline2dbuildersetalgoblocklls(state.innerobj, lambdans, null); } public static void spline2dbuildersetalgoblocklls(spline2dbuilder state, double lambdans, alglib.xparams _params) { spline2d.spline2dbuildersetalgoblocklls(state.innerobj, lambdans, _params); } /************************************************************************* This function allows you to choose least squares solver used to perform fitting. This function sets solver algorithm to "NaiveLLS". IMPORTANT: NaiveLLS is NOT intended to be used in real life code! This algorithm solves problem by generated dense (K^2)x(K^2+NPoints) matrix and solves linear least squares problem with dense solver. It is here just to test BlockLLS against reference solver (and maybe for someone trying to compare well optimized solver against straightforward approach to the LLS problem). More information on naive LLS solver: * memory requirements: ~[8*K^4+256*NPoints] bytes for KxK grid. * serial running time: O(K^6+NPoints) for KxK grid * when compared with BlockLLS, NaiveLLS has ~K larger memory demand and ~K^2 larger running time. INPUT PARAMETERS: S - spline 2D builder object LambdaNS- nonsmoothness penalty -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetalgonaivells(spline2dbuilder state, double lambdans) { spline2d.spline2dbuildersetalgonaivells(state.innerobj, lambdans, null); } public static void spline2dbuildersetalgonaivells(spline2dbuilder state, double lambdans, alglib.xparams _params) { spline2d.spline2dbuildersetalgonaivells(state.innerobj, lambdans, _params); } /************************************************************************* This function fits bicubic spline to current dataset, using current area/ grid and current LLS solver. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: State - spline 2D builder object OUTPUT PARAMETERS: S - 2D spline, fit result Rep - fitting report, which provides some additional info about errors, R2 coefficient and so on. -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dfit(spline2dbuilder state, out spline2dinterpolant s, out spline2dfitreport rep) { s = new spline2dinterpolant(); rep = new spline2dfitreport(); spline2d.spline2dfit(state.innerobj, s.innerobj, rep.innerobj, null); } public static void spline2dfit(spline2dbuilder state, out spline2dinterpolant s, out spline2dfitreport rep, alglib.xparams _params) { s = new spline2dinterpolant(); rep = new spline2dfitreport(); spline2d.spline2dfit(state.innerobj, s.innerobj, rep.innerobj, _params); } } public partial class alglib { } public partial class alglib { /************************************************************************* Buffer object which is used to perform nearest neighbor requests in the multithreaded mode (multiple threads working with same KD-tree object). This object should be created with KDTreeCreateBuffer(). *************************************************************************/ public class rbfcalcbuffer : alglibobject { // // Public declarations // public rbfcalcbuffer() { _innerobj = new rbf.rbfcalcbuffer(); } public override alglib.alglibobject make_copy() { return new rbfcalcbuffer((rbf.rbfcalcbuffer)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private rbf.rbfcalcbuffer _innerobj; public rbf.rbfcalcbuffer innerobj { get { return _innerobj; } } public rbfcalcbuffer(rbf.rbfcalcbuffer obj) { _innerobj = obj; } } /************************************************************************* RBF model. Never try to directly work with fields of this object - always use ALGLIB functions to use this object. *************************************************************************/ public class rbfmodel : alglibobject { // // Public declarations // public rbfmodel() { _innerobj = new rbf.rbfmodel(); } public override alglib.alglibobject make_copy() { return new rbfmodel((rbf.rbfmodel)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private rbf.rbfmodel _innerobj; public rbf.rbfmodel innerobj { get { return _innerobj; } } public rbfmodel(rbf.rbfmodel obj) { _innerobj = obj; } } /************************************************************************* RBF solution report: * TerminationType - termination type, positive values - success, non-positive - failure. Fields which are set by modern RBF solvers (hierarchical): * RMSError - root-mean-square error; NAN for old solvers (ML, QNN) * MaxError - maximum error; NAN for old solvers (ML, QNN) *************************************************************************/ public class rbfreport : alglibobject { // // Public declarations // public double rmserror { get { return _innerobj.rmserror; } set { _innerobj.rmserror = value; } } public double maxerror { get { return _innerobj.maxerror; } set { _innerobj.maxerror = value; } } public int arows { get { return _innerobj.arows; } set { _innerobj.arows = value; } } public int acols { get { return _innerobj.acols; } set { _innerobj.acols = value; } } public int annz { get { return _innerobj.annz; } set { _innerobj.annz = value; } } public int iterationscount { get { return _innerobj.iterationscount; } set { _innerobj.iterationscount = value; } } public int nmv { get { return _innerobj.nmv; } set { _innerobj.nmv = value; } } public int terminationtype { get { return _innerobj.terminationtype; } set { _innerobj.terminationtype = value; } } public rbfreport() { _innerobj = new rbf.rbfreport(); } public override alglib.alglibobject make_copy() { return new rbfreport((rbf.rbfreport)_innerobj.make_copy()); } // // Although some of declarations below are public, you should not use them // They are intended for internal use only // private rbf.rbfreport _innerobj; public rbf.rbfreport innerobj { get { return _innerobj; } } public rbfreport(rbf.rbfreport obj) { _innerobj = obj; } } /************************************************************************* This function serializes data structure to string. Important properties of s_out: * it contains alphanumeric characters, dots, underscores, minus signs * these symbols are grouped into words, which are separated by spaces and Windows-style (CR+LF) newlines * although serializer uses spaces and CR+LF as separators, you can replace any separator character by arbitrary combination of spaces, tabs, Windows or Unix newlines. It allows flexible reformatting of the string in case you want to include it into text or XML file. But you should not insert separators into the middle of the "words" nor you should change case of letters. * s_out can be freely moved between 32-bit and 64-bit systems, little and big endian machines, and so on. You can serialize structure on 32-bit machine and unserialize it on 64-bit one (or vice versa), or serialize it on SPARC and unserialize on x86. You can also serialize it in C# version of ALGLIB and unserialize in C++ one, and vice versa. *************************************************************************/ public static void rbfserialize(rbfmodel obj, out string s_out) { alglib.serializer s = new alglib.serializer(); s.alloc_start(); rbf.rbfalloc(s, obj.innerobj, null); s.sstart_str(); rbf.rbfserialize(s, obj.innerobj, null); s.stop(); s_out = s.get_string(); } /************************************************************************* This function unserializes data structure from string. *************************************************************************/ public static void rbfunserialize(string s_in, out rbfmodel obj) { alglib.serializer s = new alglib.serializer(); obj = new rbfmodel(); s.ustart_str(s_in); rbf.rbfunserialize(s, obj.innerobj, null); s.stop(); } /************************************************************************* This function serializes data structure to stream. Data stream generated by this function is same as string representation generated by string version of serializer - alphanumeric characters, dots, underscores, minus signs, which are grouped into words separated by spaces and CR+LF. We recommend you to read comments on string version of serializer to find out more about serialization of AlGLIB objects. *************************************************************************/ public static void rbfserialize(rbfmodel obj, System.IO.Stream stream_out) { alglib.serializer s = new alglib.serializer(); s.alloc_start(); rbf.rbfalloc(s, obj.innerobj, null); s.sstart_stream(stream_out); rbf.rbfserialize(s, obj.innerobj, null); s.stop(); } /************************************************************************* This function unserializes data structure from stream. *************************************************************************/ public static void rbfunserialize(System.IO.Stream stream_in, out rbfmodel obj) { alglib.serializer s = new alglib.serializer(); obj = new rbfmodel(); s.ustart_stream(stream_in); rbf.rbfunserialize(s, obj.innerobj, null); s.stop(); } /************************************************************************* This function creates RBF model for a scalar (NY=1) or vector (NY>1) function in a NX-dimensional space (NX>=1). Newly created model is empty. It can be used for interpolation right after creation, but it just returns zeros. You have to add points to the model, tune interpolation settings, and then call model construction function rbfbuildmodel() which will update model according to your specification. USAGE: 1. User creates model with rbfcreate() 2. User adds dataset with rbfsetpoints() (points do NOT have to be on a regular grid) or rbfsetpointsandscales(). 3. (OPTIONAL) User chooses polynomial term by calling: * rbflinterm() to set linear term * rbfconstterm() to set constant term * rbfzeroterm() to set zero term By default, linear term is used. 4. User tweaks algorithm properties with rbfsetalgohierarchical() method (or chooses one of the legacy algorithms - QNN (rbfsetalgoqnn) or ML (rbfsetalgomultilayer)). 5. User calls rbfbuildmodel() function which rebuilds model according to the specification 6. User may call rbfcalc() to calculate model value at the specified point, rbfgridcalc() to calculate model values at the points of the regular grid. User may extract model coefficients with rbfunpack() call. IMPORTANT: we recommend you to use latest model construction algorithm - hierarchical RBFs, which is activated by rbfsetalgohierarchical() function. This algorithm is the fastest one, and most memory- efficient. However, it is incompatible with older versions of ALGLIB (pre-3.11). So, if you serialize hierarchical model, you will be unable to load it in pre-3.11 ALGLIB. Other model types (QNN and RBF-ML) are still backward-compatible. INPUT PARAMETERS: NX - dimension of the space, NX>=1 NY - function dimension, NY>=1 OUTPUT PARAMETERS: S - RBF model (initially equals to zero) NOTE 1: memory requirements. RBF models require amount of memory which is proportional to the number of data points. Some additional memory is allocated during model construction, but most of this memory is freed after model coefficients are calculated. Amount of this additional memory depends on model construction algorithm being used. NOTE 2: prior to ALGLIB version 3.11, RBF models supported only NX=2 or NX=3. Any attempt to create single-dimensional or more than 3-dimensional RBF model resulted in exception. ALGLIB 3.11 supports any NX>0, but models created with NX!=2 and NX!=3 are incompatible with (a) older versions of ALGLIB, (b) old model construction algorithms (QNN or RBF-ML). So, if you create a model with NX=2 or NX=3, then, depending on specific model construction algorithm being chosen, you will (QNN and RBF-ML) or will not (HierarchicalRBF) get backward compatibility with older versions of ALGLIB. You have a choice here. However, if you create a model with NX neither 2 nor 3, you have no backward compatibility from the start, and you are forced to use hierarchical RBFs and ALGLIB 3.11 or later. -- ALGLIB -- Copyright 13.12.2011, 20.06.2016 by Bochkanov Sergey *************************************************************************/ public static void rbfcreate(int nx, int ny, out rbfmodel s) { s = new rbfmodel(); rbf.rbfcreate(nx, ny, s.innerobj, null); } public static void rbfcreate(int nx, int ny, out rbfmodel s, alglib.xparams _params) { s = new rbfmodel(); rbf.rbfcreate(nx, ny, s.innerobj, _params); } /************************************************************************* This function creates buffer structure which can be used to perform parallel RBF model evaluations (with one RBF model instance being used from multiple threads, as long as different threads use different instances of buffer). This buffer object can be used with rbftscalcbuf() function (here "ts" stands for "thread-safe", "buf" is a suffix which denotes function which reuses previously allocated output space). How to use it: * create RBF model structure with rbfcreate() * load data, tune parameters * call rbfbuildmodel() * call rbfcreatecalcbuffer(), once per thread working with RBF model (you should call this function only AFTER call to rbfbuildmodel(), see below for more information) * call rbftscalcbuf() from different threads, with each thread working with its own copy of buffer object. INPUT PARAMETERS S - RBF model OUTPUT PARAMETERS Buf - external buffer. IMPORTANT: buffer object should be used only with RBF model object which was used to initialize buffer. Any attempt to use buffer with different object is dangerous - you may get memory violation error because sizes of internal arrays do not fit to dimensions of RBF structure. IMPORTANT: you should call this function only for model which was built with rbfbuildmodel() function, after successful invocation of rbfbuildmodel(). Sizes of some internal structures are determined only after model is built, so buffer object created before model construction stage will be useless (and any attempt to use it will result in exception). -- ALGLIB -- Copyright 02.04.2016 by Sergey Bochkanov *************************************************************************/ public static void rbfcreatecalcbuffer(rbfmodel s, out rbfcalcbuffer buf) { buf = new rbfcalcbuffer(); rbf.rbfcreatecalcbuffer(s.innerobj, buf.innerobj, null); } public static void rbfcreatecalcbuffer(rbfmodel s, out rbfcalcbuffer buf, alglib.xparams _params) { buf = new rbfcalcbuffer(); rbf.rbfcreatecalcbuffer(s.innerobj, buf.innerobj, _params); } /************************************************************************* This function adds dataset. This function overrides results of the previous calls, i.e. multiple calls of this function will result in only the last set being added. IMPORTANT: ALGLIB version 3.11 and later allows you to specify a set of per-dimension scales. Interpolation radii are multiplied by the scale vector. It may be useful if you have mixed spatio-temporal data (say, a set of 3D slices recorded at different times). You should call rbfsetpointsandscales() function to use this feature. INPUT PARAMETERS: S - RBF model, initialized by rbfcreate() call. XY - points, array[N,NX+NY]. One row corresponds to one point in the dataset. First NX elements are coordinates, next NY elements are function values. Array may be larger than specified, in this case only leading [N,NX+NY] elements will be used. N - number of points in the dataset After you've added dataset and (optionally) tuned algorithm settings you should call rbfbuildmodel() in order to build a model for you. NOTE: dataset added by this function is not saved during model serialization. MODEL ITSELF is serialized, but data used to build it are not. So, if you 1) add dataset to empty RBF model, 2) serialize and unserialize it, then you will get an empty RBF model with no dataset being attached. From the other side, if you call rbfbuildmodel() between (1) and (2), then after (2) you will get your fully constructed RBF model - but again with no dataset attached, so subsequent calls to rbfbuildmodel() will produce empty model. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfsetpoints(rbfmodel s, double[,] xy, int n) { rbf.rbfsetpoints(s.innerobj, xy, n, null); } public static void rbfsetpoints(rbfmodel s, double[,] xy, int n, alglib.xparams _params) { rbf.rbfsetpoints(s.innerobj, xy, n, _params); } public static void rbfsetpoints(rbfmodel s, double[,] xy) { int n; n = ap.rows(xy); rbf.rbfsetpoints(s.innerobj, xy, n, null); return; } public static void rbfsetpoints(rbfmodel s, double[,] xy, alglib.xparams _params) { int n; n = ap.rows(xy); rbf.rbfsetpoints(s.innerobj, xy, n, _params); return; } /************************************************************************* This function adds dataset and a vector of per-dimension scales. It may be useful if you have mixed spatio-temporal data - say, a set of 3D slices recorded at different times. Such data typically require different RBF radii for spatial and temporal dimensions. ALGLIB solves this problem by specifying single RBF radius, which is (optionally) multiplied by the scale vector. This function overrides results of the previous calls, i.e. multiple calls of this function will result in only the last set being added. IMPORTANT: only HierarchicalRBF algorithm can work with scaled points. So, using this function results in RBF models which can be used in ALGLIB 3.11 or later. Previous versions of the library will be unable to unserialize models produced by HierarchicalRBF algo. Any attempt to use this function with RBF-ML or QNN algorithms will result in -3 error code being returned (incorrect algorithm). INPUT PARAMETERS: R - RBF model, initialized by rbfcreate() call. XY - points, array[N,NX+NY]. One row corresponds to one point in the dataset. First NX elements are coordinates, next NY elements are function values. Array may be larger than specified, in this case only leading [N,NX+NY] elements will be used. N - number of points in the dataset S - array[NX], scale vector, S[i]>0. After you've added dataset and (optionally) tuned algorithm settings you should call rbfbuildmodel() in order to build a model for you. NOTE: dataset added by this function is not saved during model serialization. MODEL ITSELF is serialized, but data used to build it are not. So, if you 1) add dataset to empty RBF model, 2) serialize and unserialize it, then you will get an empty RBF model with no dataset being attached. From the other side, if you call rbfbuildmodel() between (1) and (2), then after (2) you will get your fully constructed RBF model - but again with no dataset attached, so subsequent calls to rbfbuildmodel() will produce empty model. -- ALGLIB -- Copyright 20.06.2016 by Bochkanov Sergey *************************************************************************/ public static void rbfsetpointsandscales(rbfmodel r, double[,] xy, int n, double[] s) { rbf.rbfsetpointsandscales(r.innerobj, xy, n, s, null); } public static void rbfsetpointsandscales(rbfmodel r, double[,] xy, int n, double[] s, alglib.xparams _params) { rbf.rbfsetpointsandscales(r.innerobj, xy, n, s, _params); } public static void rbfsetpointsandscales(rbfmodel r, double[,] xy, double[] s) { int n; n = ap.rows(xy); rbf.rbfsetpointsandscales(r.innerobj, xy, n, s, null); return; } public static void rbfsetpointsandscales(rbfmodel r, double[,] xy, double[] s, alglib.xparams _params) { int n; n = ap.rows(xy); rbf.rbfsetpointsandscales(r.innerobj, xy, n, s, _params); return; } /************************************************************************* DEPRECATED:since version 3.11 ALGLIB includes new RBF model construction algorithm, Hierarchical RBF. This algorithm is faster and requires less memory than QNN and RBF-ML. It is especially good for large-scale interpolation problems. So, we recommend you to consider Hierarchical RBF as default option. ========================================================================== This function sets RBF interpolation algorithm. ALGLIB supports several RBF algorithms with different properties. This algorithm is called RBF-QNN and it is good for point sets with following properties: a) all points are distinct b) all points are well separated. c) points distribution is approximately uniform. There is no "contour lines", clusters of points, or other small-scale structures. Algorithm description: 1) interpolation centers are allocated to data points 2) interpolation radii are calculated as distances to the nearest centers times Q coefficient (where Q is a value from [0.75,1.50]). 3) after performing (2) radii are transformed in order to avoid situation when single outlier has very large radius and influences many points across all dataset. Transformation has following form: new_r[i] = min(r[i],Z*median(r[])) where r[i] is I-th radius, median() is a median radius across entire dataset, Z is user-specified value which controls amount of deviation from median radius. When (a) is violated, we will be unable to build RBF model. When (b) or (c) are violated, model will be built, but interpolation quality will be low. See http://www.alglib.net/interpolation/ for more information on this subject. This algorithm is used by default. Additional Q parameter controls smoothness properties of the RBF basis: * Q<0.75 will give perfectly conditioned basis, but terrible smoothness properties (RBF interpolant will have sharp peaks around function values) * Q around 1.0 gives good balance between smoothness and condition number * Q>1.5 will lead to badly conditioned systems and slow convergence of the underlying linear solver (although smoothness will be very good) * Q>2.0 will effectively make optimizer useless because it won't converge within reasonable amount of iterations. It is possible to set such large Q, but it is advised not to do so. INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call Q - Q parameter, Q>0, recommended value - 1.0 Z - Z parameter, Z>0, recommended value - 5.0 NOTE: this function has some serialization-related subtleties. We recommend you to study serialization examples from ALGLIB Reference Manual if you want to perform serialization of your models. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfsetalgoqnn(rbfmodel s, double q, double z) { rbf.rbfsetalgoqnn(s.innerobj, q, z, null); } public static void rbfsetalgoqnn(rbfmodel s, double q, double z, alglib.xparams _params) { rbf.rbfsetalgoqnn(s.innerobj, q, z, _params); } public static void rbfsetalgoqnn(rbfmodel s) { double q; double z; q = 1.0; z = 5.0; rbf.rbfsetalgoqnn(s.innerobj, q, z, null); return; } public static void rbfsetalgoqnn(rbfmodel s, alglib.xparams _params) { double q; double z; q = 1.0; z = 5.0; rbf.rbfsetalgoqnn(s.innerobj, q, z, _params); return; } /************************************************************************* DEPRECATED:since version 3.11 ALGLIB includes new RBF model construction algorithm, Hierarchical RBF. This algorithm is faster and requires less memory than QNN and RBF-ML. It is especially good for large-scale interpolation problems. So, we recommend you to consider Hierarchical RBF as default option. ========================================================================== This function sets RBF interpolation algorithm. ALGLIB supports several RBF algorithms with different properties. This algorithm is called RBF-ML. It builds multilayer RBF model, i.e. model with subsequently decreasing radii, which allows us to combine smoothness (due to large radii of the first layers) with exactness (due to small radii of the last layers) and fast convergence. Internally RBF-ML uses many different means of acceleration, from sparse matrices to KD-trees, which results in algorithm whose working time is roughly proportional to N*log(N)*Density*RBase^2*NLayers, where N is a number of points, Density is an average density if points per unit of the interpolation space, RBase is an initial radius, NLayers is a number of layers. RBF-ML is good for following kinds of interpolation problems: 1. "exact" problems (perfect fit) with well separated points 2. least squares problems with arbitrary distribution of points (algorithm gives perfect fit where it is possible, and resorts to least squares fit in the hard areas). 3. noisy problems where we want to apply some controlled amount of smoothing. INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call RBase - RBase parameter, RBase>0 NLayers - NLayers parameter, NLayers>0, recommended value to start with - about 5. LambdaV - regularization value, can be useful when solving problem in the least squares sense. Optimal lambda is problem- dependent and require trial and error. In our experience, good lambda can be as large as 0.1, and you can use 0.001 as initial guess. Default value - 0.01, which is used when LambdaV is not given. You can specify zero value, but it is not recommended to do so. TUNING ALGORITHM In order to use this algorithm you have to choose three parameters: * initial radius RBase * number of layers in the model NLayers * regularization coefficient LambdaV Initial radius is easy to choose - you can pick any number several times larger than the average distance between points. Algorithm won't break down if you choose radius which is too large (model construction time will increase, but model will be built correctly). Choose such number of layers that RLast=RBase/2^(NLayers-1) (radius used by the last layer) will be smaller than the typical distance between points. In case model error is too large, you can increase number of layers. Having more layers will make model construction and evaluation proportionally slower, but it will allow you to have model which precisely fits your data. From the other side, if you want to suppress noise, you can DECREASE number of layers to make your model less flexible. Regularization coefficient LambdaV controls smoothness of the individual models built for each layer. We recommend you to use default value in case you don't want to tune this parameter, because having non-zero LambdaV accelerates and stabilizes internal iterative algorithm. In case you want to suppress noise you can use LambdaV as additional parameter (larger value = more smoothness) to tune. TYPICAL ERRORS 1. Using initial radius which is too large. Memory requirements of the RBF-ML are roughly proportional to N*Density*RBase^2 (where Density is an average density of points per unit of the interpolation space). In the extreme case of the very large RBase we will need O(N^2) units of memory - and many layers in order to decrease radius to some reasonably small value. 2. Using too small number of layers - RBF models with large radius are not flexible enough to reproduce small variations in the target function. You need many layers with different radii, from large to small, in order to have good model. 3. Using initial radius which is too small. You will get model with "holes" in the areas which are too far away from interpolation centers. However, algorithm will work correctly (and quickly) in this case. 4. Using too many layers - you will get too large and too slow model. This model will perfectly reproduce your function, but maybe you will be able to achieve similar results with less layers (and less memory). -- ALGLIB -- Copyright 02.03.2012 by Bochkanov Sergey *************************************************************************/ public static void rbfsetalgomultilayer(rbfmodel s, double rbase, int nlayers, double lambdav) { rbf.rbfsetalgomultilayer(s.innerobj, rbase, nlayers, lambdav, null); } public static void rbfsetalgomultilayer(rbfmodel s, double rbase, int nlayers, double lambdav, alglib.xparams _params) { rbf.rbfsetalgomultilayer(s.innerobj, rbase, nlayers, lambdav, _params); } public static void rbfsetalgomultilayer(rbfmodel s, double rbase, int nlayers) { double lambdav; lambdav = 0.01; rbf.rbfsetalgomultilayer(s.innerobj, rbase, nlayers, lambdav, null); return; } public static void rbfsetalgomultilayer(rbfmodel s, double rbase, int nlayers, alglib.xparams _params) { double lambdav; lambdav = 0.01; rbf.rbfsetalgomultilayer(s.innerobj, rbase, nlayers, lambdav, _params); return; } /************************************************************************* This function sets RBF interpolation algorithm. ALGLIB supports several RBF algorithms with different properties. This algorithm is called Hierarchical RBF. It similar to its previous incarnation, RBF-ML, i.e. it also builds a sequence of models with decreasing radii. However, it uses more economical way of building upper layers (ones with large radii), which results in faster model construction and evaluation, as well as smaller memory footprint during construction. This algorithm has following important features: * ability to handle millions of points * controllable smoothing via nonlinearity penalization * support for NX-dimensional models with NX=1 or NX>3 (unlike QNN or RBF-ML) * support for specification of per-dimensional radii via scale vector, which is set by means of rbfsetpointsandscales() function. This feature is useful if you solve spatio-temporal interpolation problems, where different radii are required for spatial and temporal dimensions. Running times are roughly proportional to: * N*log(N)*NLayers - for model construction * N*NLayers - for model evaluation You may see that running time does not depend on search radius or points density, just on number of layers in the hierarchy. IMPORTANT: this model construction algorithm was introduced in ALGLIB 3.11 and produces models which are INCOMPATIBLE with previous versions of ALGLIB. You can not unserialize models produced with this function in ALGLIB 3.10 or earlier. INPUT PARAMETERS: S - RBF model, initialized by rbfcreate() call RBase - RBase parameter, RBase>0 NLayers - NLayers parameter, NLayers>0, recommended value to start with - about 5. LambdaNS- >=0, nonlinearity penalty coefficient, negative values are not allowed. This parameter adds controllable smoothing to the problem, which may reduce noise. Specification of non- zero lambda means that in addition to fitting error solver will also minimize LambdaNS*|S''(x)|^2 (appropriately generalized to multiple dimensions. Specification of exactly zero value means that no penalty is added (we do not even evaluate matrix of second derivatives which is necessary for smoothing). Calculation of nonlinearity penalty is costly - it results in several-fold increase of model construction time. Evaluation time remains the same. Optimal lambda is problem-dependent and requires trial and error. Good value to start from is 1e-5...1e-6, which corresponds to slightly noticeable smoothing of the function. Value 1e-2 usually means that quite heavy smoothing is applied. TUNING ALGORITHM In order to use this algorithm you have to choose three parameters: * initial radius RBase * number of layers in the model NLayers * penalty coefficient LambdaNS Initial radius is easy to choose - you can pick any number several times larger than the average distance between points. Algorithm won't break down if you choose radius which is too large (model construction time will increase, but model will be built correctly). Choose such number of layers that RLast=RBase/2^(NLayers-1) (radius used by the last layer) will be smaller than the typical distance between points. In case model error is too large, you can increase number of layers. Having more layers will make model construction and evaluation proportionally slower, but it will allow you to have model which precisely fits your data. From the other side, if you want to suppress noise, you can DECREASE number of layers to make your model less flexible (or specify non-zero LambdaNS). TYPICAL ERRORS 1. Using too small number of layers - RBF models with large radius are not flexible enough to reproduce small variations in the target function. You need many layers with different radii, from large to small, in order to have good model. 2. Using initial radius which is too small. You will get model with "holes" in the areas which are too far away from interpolation centers. However, algorithm will work correctly (and quickly) in this case. -- ALGLIB -- Copyright 20.06.2016 by Bochkanov Sergey *************************************************************************/ public static void rbfsetalgohierarchical(rbfmodel s, double rbase, int nlayers, double lambdans) { rbf.rbfsetalgohierarchical(s.innerobj, rbase, nlayers, lambdans, null); } public static void rbfsetalgohierarchical(rbfmodel s, double rbase, int nlayers, double lambdans, alglib.xparams _params) { rbf.rbfsetalgohierarchical(s.innerobj, rbase, nlayers, lambdans, _params); } /************************************************************************* This function sets linear term (model is a sum of radial basis functions plus linear polynomial). This function won't have effect until next call to RBFBuildModel(). INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call NOTE: this function has some serialization-related subtleties. We recommend you to study serialization examples from ALGLIB Reference Manual if you want to perform serialization of your models. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfsetlinterm(rbfmodel s) { rbf.rbfsetlinterm(s.innerobj, null); } public static void rbfsetlinterm(rbfmodel s, alglib.xparams _params) { rbf.rbfsetlinterm(s.innerobj, _params); } /************************************************************************* This function sets constant term (model is a sum of radial basis functions plus constant). This function won't have effect until next call to RBFBuildModel(). INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call NOTE: this function has some serialization-related subtleties. We recommend you to study serialization examples from ALGLIB Reference Manual if you want to perform serialization of your models. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfsetconstterm(rbfmodel s) { rbf.rbfsetconstterm(s.innerobj, null); } public static void rbfsetconstterm(rbfmodel s, alglib.xparams _params) { rbf.rbfsetconstterm(s.innerobj, _params); } /************************************************************************* This function sets zero term (model is a sum of radial basis functions without polynomial term). This function won't have effect until next call to RBFBuildModel(). INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call NOTE: this function has some serialization-related subtleties. We recommend you to study serialization examples from ALGLIB Reference Manual if you want to perform serialization of your models. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfsetzeroterm(rbfmodel s) { rbf.rbfsetzeroterm(s.innerobj, null); } public static void rbfsetzeroterm(rbfmodel s, alglib.xparams _params) { rbf.rbfsetzeroterm(s.innerobj, _params); } /************************************************************************* This function sets basis function type, which can be: * 0 for classic Gaussian * 1 for fast and compact bell-like basis function, which becomes exactly zero at distance equal to 3*R (default option). INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call BF - basis function type: * 0 - classic Gaussian * 1 - fast and compact one -- ALGLIB -- Copyright 01.02.2017 by Bochkanov Sergey *************************************************************************/ public static void rbfsetv2bf(rbfmodel s, int bf) { rbf.rbfsetv2bf(s.innerobj, bf, null); } public static void rbfsetv2bf(rbfmodel s, int bf, alglib.xparams _params) { rbf.rbfsetv2bf(s.innerobj, bf, _params); } /************************************************************************* This function sets stopping criteria of the underlying linear solver for hierarchical (version 2) RBF constructor. INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call MaxIts - this criterion will stop algorithm after MaxIts iterations. Typically a few hundreds iterations is required, with 400 being a good default value to start experimentation. Zero value means that default value will be selected. -- ALGLIB -- Copyright 01.02.2017 by Bochkanov Sergey *************************************************************************/ public static void rbfsetv2its(rbfmodel s, int maxits) { rbf.rbfsetv2its(s.innerobj, maxits, null); } public static void rbfsetv2its(rbfmodel s, int maxits, alglib.xparams _params) { rbf.rbfsetv2its(s.innerobj, maxits, _params); } /************************************************************************* This function sets support radius parameter of hierarchical (version 2) RBF constructor. Hierarchical RBF model achieves great speed-up by removing from the model excessive (too dense) nodes. Say, if you have RBF radius equal to 1 meter, and two nodes are just 1 millimeter apart, you may remove one of them without reducing model quality. Support radius parameter is used to justify which points need removal, and which do not. If two points are less than SUPPORT_R*CUR_RADIUS units of distance apart, one of them is removed from the model. The larger support radius is, the faster model construction AND evaluation are. However, too large values result in "bumpy" models. INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call R - support radius coefficient, >=0. Recommended values are [0.1,0.4] range, with 0.1 being default value. -- ALGLIB -- Copyright 01.02.2017 by Bochkanov Sergey *************************************************************************/ public static void rbfsetv2supportr(rbfmodel s, double r) { rbf.rbfsetv2supportr(s.innerobj, r, null); } public static void rbfsetv2supportr(rbfmodel s, double r, alglib.xparams _params) { rbf.rbfsetv2supportr(s.innerobj, r, _params); } /************************************************************************* This function builds RBF model and returns report (contains some information which can be used for evaluation of the algorithm properties). Call to this function modifies RBF model by calculating its centers/radii/ weights and saving them into RBFModel structure. Initially RBFModel contain zero coefficients, but after call to this function we will have coefficients which were calculated in order to fit our dataset. After you called this function you can call RBFCalc(), RBFGridCalc() and other model calculation functions. INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call Rep - report: * Rep.TerminationType: * -5 - non-distinct basis function centers were detected, interpolation aborted; only QNN returns this error code, other algorithms can handle non- distinct nodes. * -4 - nonconvergence of the internal SVD solver * -3 incorrect model construction algorithm was chosen: QNN or RBF-ML, combined with one of the incompatible features - NX=1 or NX>3; points with per-dimension scales. * 1 - successful termination * 8 - a termination request was submitted via rbfrequesttermination() function. Fields which are set only by modern RBF solvers (hierarchical or nonnegative; older solvers like QNN and ML initialize these fields by NANs): * rep.rmserror - root-mean-square error at nodes * rep.maxerror - maximum error at nodes Fields are used for debugging purposes: * Rep.IterationsCount - iterations count of the LSQR solver * Rep.NMV - number of matrix-vector products * Rep.ARows - rows count for the system matrix * Rep.ACols - columns count for the system matrix * Rep.ANNZ - number of significantly non-zero elements (elements above some algorithm-determined threshold) NOTE: failure to build model will leave current state of the structure unchanged. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfbuildmodel(rbfmodel s, out rbfreport rep) { rep = new rbfreport(); rbf.rbfbuildmodel(s.innerobj, rep.innerobj, null); } public static void rbfbuildmodel(rbfmodel s, out rbfreport rep, alglib.xparams _params) { rep = new rbfreport(); rbf.rbfbuildmodel(s.innerobj, rep.innerobj, _params); } /************************************************************************* This function calculates values of the RBF model in the given point. IMPORTANT: this function works only with modern (hierarchical) RBFs. It can not be used with legacy (version 1) RBFs because older RBF code does not support 1-dimensional models. This function should be used when we have NY=1 (scalar function) and NX=1 (1-dimensional space). If you have 3-dimensional space, use rbfcalc3(). If you have 2-dimensional space, use rbfcalc3(). If you have general situation (NX-dimensional space, NY-dimensional function) you should use generic rbfcalc(). If you want to perform parallel model evaluation from multiple threads, use rbftscalcbuf() with per-thread buffer object. This function returns 0.0 when: * model is not initialized * NX<>1 * NY<>1 INPUT PARAMETERS: S - RBF model X0 - X-coordinate, finite number RESULT: value of the model or 0.0 (as defined above) -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static double rbfcalc1(rbfmodel s, double x0) { return rbf.rbfcalc1(s.innerobj, x0, null); } public static double rbfcalc1(rbfmodel s, double x0, alglib.xparams _params) { return rbf.rbfcalc1(s.innerobj, x0, _params); } /************************************************************************* This function calculates values of the RBF model in the given point. This function should be used when we have NY=1 (scalar function) and NX=2 (2-dimensional space). If you have 3-dimensional space, use rbfcalc3(). If you have general situation (NX-dimensional space, NY-dimensional function) you should use generic rbfcalc(). If you want to calculate function values many times, consider using rbfgridcalc2v(), which is far more efficient than many subsequent calls to rbfcalc2(). If you want to perform parallel model evaluation from multiple threads, use rbftscalcbuf() with per-thread buffer object. This function returns 0.0 when: * model is not initialized * NX<>2 *NY<>1 INPUT PARAMETERS: S - RBF model X0 - first coordinate, finite number X1 - second coordinate, finite number RESULT: value of the model or 0.0 (as defined above) -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static double rbfcalc2(rbfmodel s, double x0, double x1) { return rbf.rbfcalc2(s.innerobj, x0, x1, null); } public static double rbfcalc2(rbfmodel s, double x0, double x1, alglib.xparams _params) { return rbf.rbfcalc2(s.innerobj, x0, x1, _params); } /************************************************************************* This function calculates value of the RBF model in the given point. This function should be used when we have NY=1 (scalar function) and NX=3 (3-dimensional space). If you have 2-dimensional space, use rbfcalc2(). If you have general situation (NX-dimensional space, NY-dimensional function) you should use generic rbfcalc(). If you want to calculate function values many times, consider using rbfgridcalc3v(), which is far more efficient than many subsequent calls to rbfcalc3(). If you want to perform parallel model evaluation from multiple threads, use rbftscalcbuf() with per-thread buffer object. This function returns 0.0 when: * model is not initialized * NX<>3 *NY<>1 INPUT PARAMETERS: S - RBF model X0 - first coordinate, finite number X1 - second coordinate, finite number X2 - third coordinate, finite number RESULT: value of the model or 0.0 (as defined above) -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static double rbfcalc3(rbfmodel s, double x0, double x1, double x2) { return rbf.rbfcalc3(s.innerobj, x0, x1, x2, null); } public static double rbfcalc3(rbfmodel s, double x0, double x1, double x2, alglib.xparams _params) { return rbf.rbfcalc3(s.innerobj, x0, x1, x2, _params); } /************************************************************************* This function calculates values of the RBF model at the given point. This is general function which can be used for arbitrary NX (dimension of the space of arguments) and NY (dimension of the function itself). However when you have NY=1 you may find more convenient to use rbfcalc2() or rbfcalc3(). If you want to perform parallel model evaluation from multiple threads, use rbftscalcbuf() with per-thread buffer object. This function returns 0.0 when model is not initialized. INPUT PARAMETERS: S - RBF model X - coordinates, array[NX]. X may have more than NX elements, in this case only leading NX will be used. OUTPUT PARAMETERS: Y - function value, array[NY]. Y is out-parameter and reallocated after call to this function. In case you want to reuse previously allocated Y, you may use RBFCalcBuf(), which reallocates Y only when it is too small. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfcalc(rbfmodel s, double[] x, out double[] y) { y = new double[0]; rbf.rbfcalc(s.innerobj, x, ref y, null); } public static void rbfcalc(rbfmodel s, double[] x, out double[] y, alglib.xparams _params) { y = new double[0]; rbf.rbfcalc(s.innerobj, x, ref y, _params); } /************************************************************************* This function calculates values of the RBF model at the given point. Same as rbfcalc(), but does not reallocate Y when in is large enough to store function values. If you want to perform parallel model evaluation from multiple threads, use rbftscalcbuf() with per-thread buffer object. INPUT PARAMETERS: S - RBF model X - coordinates, array[NX]. X may have more than NX elements, in this case only leading NX will be used. Y - possibly preallocated array OUTPUT PARAMETERS: Y - function value, array[NY]. Y is not reallocated when it is larger than NY. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfcalcbuf(rbfmodel s, double[] x, ref double[] y) { rbf.rbfcalcbuf(s.innerobj, x, ref y, null); } public static void rbfcalcbuf(rbfmodel s, double[] x, ref double[] y, alglib.xparams _params) { rbf.rbfcalcbuf(s.innerobj, x, ref y, _params); } /************************************************************************* This function calculates values of the RBF model at the given point, using external buffer object (internal temporaries of RBF model are not modified). This function allows to use same RBF model object in different threads, assuming that different threads use different instances of buffer structure. INPUT PARAMETERS: S - RBF model, may be shared between different threads Buf - buffer object created for this particular instance of RBF model with rbfcreatecalcbuffer(). X - coordinates, array[NX]. X may have more than NX elements, in this case only leading NX will be used. Y - possibly preallocated array OUTPUT PARAMETERS: Y - function value, array[NY]. Y is not reallocated when it is larger than NY. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbftscalcbuf(rbfmodel s, rbfcalcbuffer buf, double[] x, ref double[] y) { rbf.rbftscalcbuf(s.innerobj, buf.innerobj, x, ref y, null); } public static void rbftscalcbuf(rbfmodel s, rbfcalcbuffer buf, double[] x, ref double[] y, alglib.xparams _params) { rbf.rbftscalcbuf(s.innerobj, buf.innerobj, x, ref y, _params); } /************************************************************************* This is legacy function for gridded calculation of RBF model. It is superseded by rbfgridcalc2v() and rbfgridcalc2vsubset() functions. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfgridcalc2(rbfmodel s, double[] x0, int n0, double[] x1, int n1, out double[,] y) { y = new double[0,0]; rbf.rbfgridcalc2(s.innerobj, x0, n0, x1, n1, ref y, null); } public static void rbfgridcalc2(rbfmodel s, double[] x0, int n0, double[] x1, int n1, out double[,] y, alglib.xparams _params) { y = new double[0,0]; rbf.rbfgridcalc2(s.innerobj, x0, n0, x1, n1, ref y, _params); } /************************************************************************* This function calculates values of the RBF model at the regular grid, which has N0*N1 points, with Point[I,J] = (X0[I], X1[J]). Vector-valued RBF models are supported. This function returns 0.0 when: * model is not initialized * NX<>2 ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. NOTE: Parallel processing is implemented only for modern (hierarchical) RBFs. Legacy version 1 RBFs (created by QNN or RBF-ML) are still processed serially. INPUT PARAMETERS: S - RBF model, used in read-only mode, can be shared between multiple invocations of this function from multiple threads. X0 - array of grid nodes, first coordinates, array[N0]. Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N0 - grid size (number of nodes) in the first dimension X1 - array of grid nodes, second coordinates, array[N1] Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N1 - grid size (number of nodes) in the second dimension OUTPUT PARAMETERS: Y - function values, array[NY*N0*N1], where NY is a number of "output" vector values (this function supports vector- valued RBF models). Y is out-variable and is reallocated by this function. Y[K+NY*(I0+I1*N0)]=F_k(X0[I0],X1[I1]), for: * K=0...NY-1 * I0=0...N0-1 * I1=0...N1-1 NOTE: this function supports weakly ordered grid nodes, i.e. you may have X[i]=X[i+1] for some i. It does not provide you any performance benefits due to duplication of points, just convenience and flexibility. NOTE: this function is re-entrant, i.e. you may use same rbfmodel structure in multiple threads calling this function for different grids. NOTE: if you need function values on some subset of regular grid, which may be described as "several compact and dense islands", you may use rbfgridcalc2vsubset(). -- ALGLIB -- Copyright 27.01.2017 by Bochkanov Sergey *************************************************************************/ public static void rbfgridcalc2v(rbfmodel s, double[] x0, int n0, double[] x1, int n1, out double[] y) { y = new double[0]; rbf.rbfgridcalc2v(s.innerobj, x0, n0, x1, n1, ref y, null); } public static void rbfgridcalc2v(rbfmodel s, double[] x0, int n0, double[] x1, int n1, out double[] y, alglib.xparams _params) { y = new double[0]; rbf.rbfgridcalc2v(s.innerobj, x0, n0, x1, n1, ref y, _params); } /************************************************************************* This function calculates values of the RBF model at some subset of regular grid: * grid has N0*N1 points, with Point[I,J] = (X0[I], X1[J]) * only values at some subset of this grid are required Vector-valued RBF models are supported. This function returns 0.0 when: * model is not initialized * NX<>2 ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. NOTE: Parallel processing is implemented only for modern (hierarchical) RBFs. Legacy version 1 RBFs (created by QNN or RBF-ML) are still processed serially. INPUT PARAMETERS: S - RBF model, used in read-only mode, can be shared between multiple invocations of this function from multiple threads. X0 - array of grid nodes, first coordinates, array[N0]. Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N0 - grid size (number of nodes) in the first dimension X1 - array of grid nodes, second coordinates, array[N1] Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N1 - grid size (number of nodes) in the second dimension FlagY - array[N0*N1]: * Y[I0+I1*N0] corresponds to node (X0[I0],X1[I1]) * it is a "bitmap" array which contains False for nodes which are NOT calculated, and True for nodes which are required. OUTPUT PARAMETERS: Y - function values, array[NY*N0*N1*N2], where NY is a number of "output" vector values (this function supports vector- valued RBF models): * Y[K+NY*(I0+I1*N0)]=F_k(X0[I0],X1[I1]), for K=0...NY-1, I0=0...N0-1, I1=0...N1-1. * elements of Y[] which correspond to FlagY[]=True are loaded by model values (which may be exactly zero for some nodes). * elements of Y[] which correspond to FlagY[]=False MAY be initialized by zeros OR may be calculated. This function processes grid as a hierarchy of nested blocks and micro-rows. If just one element of micro-row is required, entire micro-row (up to 8 nodes in the current version, but no promises) is calculated. NOTE: this function supports weakly ordered grid nodes, i.e. you may have X[i]=X[i+1] for some i. It does not provide you any performance benefits due to duplication of points, just convenience and flexibility. NOTE: this function is re-entrant, i.e. you may use same rbfmodel structure in multiple threads calling this function for different grids. -- ALGLIB -- Copyright 04.03.2016 by Bochkanov Sergey *************************************************************************/ public static void rbfgridcalc2vsubset(rbfmodel s, double[] x0, int n0, double[] x1, int n1, bool[] flagy, out double[] y) { y = new double[0]; rbf.rbfgridcalc2vsubset(s.innerobj, x0, n0, x1, n1, flagy, ref y, null); } public static void rbfgridcalc2vsubset(rbfmodel s, double[] x0, int n0, double[] x1, int n1, bool[] flagy, out double[] y, alglib.xparams _params) { y = new double[0]; rbf.rbfgridcalc2vsubset(s.innerobj, x0, n0, x1, n1, flagy, ref y, _params); } /************************************************************************* This function calculates values of the RBF model at the regular grid, which has N0*N1*N2 points, with Point[I,J,K] = (X0[I], X1[J], X2[K]). Vector-valued RBF models are supported. This function returns 0.0 when: * model is not initialized * NX<>3 ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. NOTE: Parallel processing is implemented only for modern (hierarchical) RBFs. Legacy version 1 RBFs (created by QNN or RBF-ML) are still processed serially. INPUT PARAMETERS: S - RBF model, used in read-only mode, can be shared between multiple invocations of this function from multiple threads. X0 - array of grid nodes, first coordinates, array[N0]. Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N0 - grid size (number of nodes) in the first dimension X1 - array of grid nodes, second coordinates, array[N1] Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N1 - grid size (number of nodes) in the second dimension X2 - array of grid nodes, third coordinates, array[N2] Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N2 - grid size (number of nodes) in the third dimension OUTPUT PARAMETERS: Y - function values, array[NY*N0*N1*N2], where NY is a number of "output" vector values (this function supports vector- valued RBF models). Y is out-variable and is reallocated by this function. Y[K+NY*(I0+I1*N0+I2*N0*N1)]=F_k(X0[I0],X1[I1],X2[I2]), for: * K=0...NY-1 * I0=0...N0-1 * I1=0...N1-1 * I2=0...N2-1 NOTE: this function supports weakly ordered grid nodes, i.e. you may have X[i]=X[i+1] for some i. It does not provide you any performance benefits due to duplication of points, just convenience and flexibility. NOTE: this function is re-entrant, i.e. you may use same rbfmodel structure in multiple threads calling this function for different grids. NOTE: if you need function values on some subset of regular grid, which may be described as "several compact and dense islands", you may use rbfgridcalc3vsubset(). -- ALGLIB -- Copyright 04.03.2016 by Bochkanov Sergey *************************************************************************/ public static void rbfgridcalc3v(rbfmodel s, double[] x0, int n0, double[] x1, int n1, double[] x2, int n2, out double[] y) { y = new double[0]; rbf.rbfgridcalc3v(s.innerobj, x0, n0, x1, n1, x2, n2, ref y, null); } public static void rbfgridcalc3v(rbfmodel s, double[] x0, int n0, double[] x1, int n1, double[] x2, int n2, out double[] y, alglib.xparams _params) { y = new double[0]; rbf.rbfgridcalc3v(s.innerobj, x0, n0, x1, n1, x2, n2, ref y, _params); } /************************************************************************* This function calculates values of the RBF model at some subset of regular grid: * grid has N0*N1*N2 points, with Point[I,J,K] = (X0[I], X1[J], X2[K]) * only values at some subset of this grid are required Vector-valued RBF models are supported. This function returns 0.0 when: * model is not initialized * NX<>3 ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. NOTE: Parallel processing is implemented only for modern (hierarchical) RBFs. Legacy version 1 RBFs (created by QNN or RBF-ML) are still processed serially. INPUT PARAMETERS: S - RBF model, used in read-only mode, can be shared between multiple invocations of this function from multiple threads. X0 - array of grid nodes, first coordinates, array[N0]. Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N0 - grid size (number of nodes) in the first dimension X1 - array of grid nodes, second coordinates, array[N1] Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N1 - grid size (number of nodes) in the second dimension X2 - array of grid nodes, third coordinates, array[N2] Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N2 - grid size (number of nodes) in the third dimension FlagY - array[N0*N1*N2]: * Y[I0+I1*N0+I2*N0*N1] corresponds to node (X0[I0],X1[I1],X2[I2]) * it is a "bitmap" array which contains False for nodes which are NOT calculated, and True for nodes which are required. OUTPUT PARAMETERS: Y - function values, array[NY*N0*N1*N2], where NY is a number of "output" vector values (this function supports vector- valued RBF models): * Y[K+NY*(I0+I1*N0+I2*N0*N1)]=F_k(X0[I0],X1[I1],X2[I2]), for K=0...NY-1, I0=0...N0-1, I1=0...N1-1, I2=0...N2-1. * elements of Y[] which correspond to FlagY[]=True are loaded by model values (which may be exactly zero for some nodes). * elements of Y[] which correspond to FlagY[]=False MAY be initialized by zeros OR may be calculated. This function processes grid as a hierarchy of nested blocks and micro-rows. If just one element of micro-row is required, entire micro-row (up to 8 nodes in the current version, but no promises) is calculated. NOTE: this function supports weakly ordered grid nodes, i.e. you may have X[i]=X[i+1] for some i. It does not provide you any performance benefits due to duplication of points, just convenience and flexibility. NOTE: this function is re-entrant, i.e. you may use same rbfmodel structure in multiple threads calling this function for different grids. -- ALGLIB -- Copyright 04.03.2016 by Bochkanov Sergey *************************************************************************/ public static void rbfgridcalc3vsubset(rbfmodel s, double[] x0, int n0, double[] x1, int n1, double[] x2, int n2, bool[] flagy, out double[] y) { y = new double[0]; rbf.rbfgridcalc3vsubset(s.innerobj, x0, n0, x1, n1, x2, n2, flagy, ref y, null); } public static void rbfgridcalc3vsubset(rbfmodel s, double[] x0, int n0, double[] x1, int n1, double[] x2, int n2, bool[] flagy, out double[] y, alglib.xparams _params) { y = new double[0]; rbf.rbfgridcalc3vsubset(s.innerobj, x0, n0, x1, n1, x2, n2, flagy, ref y, _params); } /************************************************************************* This function "unpacks" RBF model by extracting its coefficients. INPUT PARAMETERS: S - RBF model OUTPUT PARAMETERS: NX - dimensionality of argument NY - dimensionality of the target function XWR - model information, array[NC,NX+NY+1]. One row of the array corresponds to one basis function: * first NX columns - coordinates of the center * next NY columns - weights, one per dimension of the function being modelled For ModelVersion=1: * last column - radius, same for all dimensions of the function being modelled For ModelVersion=2: * last NX columns - radii, one per dimension NC - number of the centers V - polynomial term , array[NY,NX+1]. One row per one dimension of the function being modelled. First NX elements are linear coefficients, V[NX] is equal to the constant part. ModelVersion-version of the RBF model: * 1 - for models created by QNN and RBF-ML algorithms, compatible with ALGLIB 3.10 or earlier. * 2 - for models created by HierarchicalRBF, requires ALGLIB 3.11 or later -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfunpack(rbfmodel s, out int nx, out int ny, out double[,] xwr, out int nc, out double[,] v, out int modelversion) { nx = 0; ny = 0; xwr = new double[0,0]; nc = 0; v = new double[0,0]; modelversion = 0; rbf.rbfunpack(s.innerobj, ref nx, ref ny, ref xwr, ref nc, ref v, ref modelversion, null); } public static void rbfunpack(rbfmodel s, out int nx, out int ny, out double[,] xwr, out int nc, out double[,] v, out int modelversion, alglib.xparams _params) { nx = 0; ny = 0; xwr = new double[0,0]; nc = 0; v = new double[0,0]; modelversion = 0; rbf.rbfunpack(s.innerobj, ref nx, ref ny, ref xwr, ref nc, ref v, ref modelversion, _params); } /************************************************************************* This function returns model version. INPUT PARAMETERS: S - RBF model RESULT: * 1 - for models created by QNN and RBF-ML algorithms, compatible with ALGLIB 3.10 or earlier. * 2 - for models created by HierarchicalRBF, requires ALGLIB 3.11 or later -- ALGLIB -- Copyright 06.07.2016 by Bochkanov Sergey *************************************************************************/ public static int rbfgetmodelversion(rbfmodel s) { return rbf.rbfgetmodelversion(s.innerobj, null); } public static int rbfgetmodelversion(rbfmodel s, alglib.xparams _params) { return rbf.rbfgetmodelversion(s.innerobj, _params); } /************************************************************************* This function is used to peek into hierarchical RBF construction process from some other thread and get current progress indicator. It returns value in [0,1]. IMPORTANT: only HRBFs (hierarchical RBFs) support peeking into progress indicator. Legacy RBF-ML and RBF-QNN do not support it. You will always get 0 value. INPUT PARAMETERS: S - RBF model object RESULT: progress value, in [0,1] -- ALGLIB -- Copyright 17.11.2018 by Bochkanov Sergey *************************************************************************/ public static double rbfpeekprogress(rbfmodel s) { return rbf.rbfpeekprogress(s.innerobj, null); } public static double rbfpeekprogress(rbfmodel s, alglib.xparams _params) { return rbf.rbfpeekprogress(s.innerobj, _params); } /************************************************************************* This function is used to submit a request for termination of the hierarchical RBF construction process from some other thread. As result, RBF construction is terminated smoothly (with proper deallocation of all necessary resources) and resultant model is filled by zeros. A rep.terminationtype=8 will be returned upon receiving such request. IMPORTANT: only HRBFs (hierarchical RBFs) support termination requests. Legacy RBF-ML and RBF-QNN do not support it. An attempt to terminate their construction will be ignored. IMPORTANT: termination request flag is cleared when the model construction starts. Thus, any pre-construction termination requests will be silently ignored - only ones submitted AFTER construction has actually began will be handled. INPUT PARAMETERS: S - RBF model object -- ALGLIB -- Copyright 17.11.2018 by Bochkanov Sergey *************************************************************************/ public static void rbfrequesttermination(rbfmodel s) { rbf.rbfrequesttermination(s.innerobj, null); } public static void rbfrequesttermination(rbfmodel s, alglib.xparams _params) { rbf.rbfrequesttermination(s.innerobj, _params); } } public partial class alglib { /************************************************************************* This function is left for backward compatibility. Use fitspheremc() instead. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void nsfitspheremcc(double[,] xy, int npoints, int nx, out double[] cx, out double rhi) { cx = new double[0]; rhi = 0; intcomp.nsfitspheremcc(xy, npoints, nx, ref cx, ref rhi, null); } public static void nsfitspheremcc(double[,] xy, int npoints, int nx, out double[] cx, out double rhi, alglib.xparams _params) { cx = new double[0]; rhi = 0; intcomp.nsfitspheremcc(xy, npoints, nx, ref cx, ref rhi, _params); } /************************************************************************* This function is left for backward compatibility. Use fitspheremi() instead. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void nsfitspheremic(double[,] xy, int npoints, int nx, out double[] cx, out double rlo) { cx = new double[0]; rlo = 0; intcomp.nsfitspheremic(xy, npoints, nx, ref cx, ref rlo, null); } public static void nsfitspheremic(double[,] xy, int npoints, int nx, out double[] cx, out double rlo, alglib.xparams _params) { cx = new double[0]; rlo = 0; intcomp.nsfitspheremic(xy, npoints, nx, ref cx, ref rlo, _params); } /************************************************************************* This function is left for backward compatibility. Use fitspheremz() instead. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void nsfitspheremzc(double[,] xy, int npoints, int nx, out double[] cx, out double rlo, out double rhi) { cx = new double[0]; rlo = 0; rhi = 0; intcomp.nsfitspheremzc(xy, npoints, nx, ref cx, ref rlo, ref rhi, null); } public static void nsfitspheremzc(double[,] xy, int npoints, int nx, out double[] cx, out double rlo, out double rhi, alglib.xparams _params) { cx = new double[0]; rlo = 0; rhi = 0; intcomp.nsfitspheremzc(xy, npoints, nx, ref cx, ref rlo, ref rhi, _params); } /************************************************************************* This function is left for backward compatibility. Use fitspherex() instead. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void nsfitspherex(double[,] xy, int npoints, int nx, int problemtype, double epsx, int aulits, double penalty, out double[] cx, out double rlo, out double rhi) { cx = new double[0]; rlo = 0; rhi = 0; intcomp.nsfitspherex(xy, npoints, nx, problemtype, epsx, aulits, penalty, ref cx, ref rlo, ref rhi, null); } public static void nsfitspherex(double[,] xy, int npoints, int nx, int problemtype, double epsx, int aulits, double penalty, out double[] cx, out double rlo, out double rhi, alglib.xparams _params) { cx = new double[0]; rlo = 0; rhi = 0; intcomp.nsfitspherex(xy, npoints, nx, problemtype, epsx, aulits, penalty, ref cx, ref rlo, ref rhi, _params); } } public partial class alglib { public class idw { /************************************************************************* Buffer object which is used to perform evaluation requests in the multithreaded mode (multiple threads working with same IDW object). This object should be created with idwcreatecalcbuffer(). *************************************************************************/ public class idwcalcbuffer : apobject { public double[] x; public double[] y; public double[] tsyw; public double[] tsw; public double[,] tsxy; public double[] tsdist; public nearestneighbor.kdtreerequestbuffer requestbuffer; public idwcalcbuffer() { init(); } public override void init() { x = new double[0]; y = new double[0]; tsyw = new double[0]; tsw = new double[0]; tsxy = new double[0,0]; tsdist = new double[0]; requestbuffer = new nearestneighbor.kdtreerequestbuffer(); } public override alglib.apobject make_copy() { idwcalcbuffer _result = new idwcalcbuffer(); _result.x = (double[])x.Clone(); _result.y = (double[])y.Clone(); _result.tsyw = (double[])tsyw.Clone(); _result.tsw = (double[])tsw.Clone(); _result.tsxy = (double[,])tsxy.Clone(); _result.tsdist = (double[])tsdist.Clone(); _result.requestbuffer = (nearestneighbor.kdtreerequestbuffer)requestbuffer.make_copy(); return _result; } }; /************************************************************************* IDW (Inverse Distance Weighting) model object. *************************************************************************/ public class idwmodel : apobject { public int nx; public int ny; public double[] globalprior; public int algotype; public int nlayers; public double r0; public double rdecay; public double lambda0; public double lambdalast; public double lambdadecay; public double shepardp; public nearestneighbor.kdtree tree; public int npoints; public double[] shepardxy; public idwcalcbuffer buffer; public idwmodel() { init(); } public override void init() { globalprior = new double[0]; tree = new nearestneighbor.kdtree(); shepardxy = new double[0]; buffer = new idwcalcbuffer(); } public override alglib.apobject make_copy() { idwmodel _result = new idwmodel(); _result.nx = nx; _result.ny = ny; _result.globalprior = (double[])globalprior.Clone(); _result.algotype = algotype; _result.nlayers = nlayers; _result.r0 = r0; _result.rdecay = rdecay; _result.lambda0 = lambda0; _result.lambdalast = lambdalast; _result.lambdadecay = lambdadecay; _result.shepardp = shepardp; _result.tree = (nearestneighbor.kdtree)tree.make_copy(); _result.npoints = npoints; _result.shepardxy = (double[])shepardxy.Clone(); _result.buffer = (idwcalcbuffer)buffer.make_copy(); return _result; } }; /************************************************************************* Builder object used to generate IDW (Inverse Distance Weighting) model. *************************************************************************/ public class idwbuilder : apobject { public int priortermtype; public double[] priortermval; public int algotype; public int nlayers; public double r0; public double rdecay; public double lambda0; public double lambdalast; public double lambdadecay; public double shepardp; public double[] xy; public int npoints; public int nx; public int ny; public double[,] tmpxy; public double[,] tmplayers; public int[] tmptags; public double[] tmpdist; public double[] tmpx; public double[] tmpwy; public double[] tmpw; public nearestneighbor.kdtree tmptree; public double[] tmpmean; public idwbuilder() { init(); } public override void init() { priortermval = new double[0]; xy = new double[0]; tmpxy = new double[0,0]; tmplayers = new double[0,0]; tmptags = new int[0]; tmpdist = new double[0]; tmpx = new double[0]; tmpwy = new double[0]; tmpw = new double[0]; tmptree = new nearestneighbor.kdtree(); tmpmean = new double[0]; } public override alglib.apobject make_copy() { idwbuilder _result = new idwbuilder(); _result.priortermtype = priortermtype; _result.priortermval = (double[])priortermval.Clone(); _result.algotype = algotype; _result.nlayers = nlayers; _result.r0 = r0; _result.rdecay = rdecay; _result.lambda0 = lambda0; _result.lambdalast = lambdalast; _result.lambdadecay = lambdadecay; _result.shepardp = shepardp; _result.xy = (double[])xy.Clone(); _result.npoints = npoints; _result.nx = nx; _result.ny = ny; _result.tmpxy = (double[,])tmpxy.Clone(); _result.tmplayers = (double[,])tmplayers.Clone(); _result.tmptags = (int[])tmptags.Clone(); _result.tmpdist = (double[])tmpdist.Clone(); _result.tmpx = (double[])tmpx.Clone(); _result.tmpwy = (double[])tmpwy.Clone(); _result.tmpw = (double[])tmpw.Clone(); _result.tmptree = (nearestneighbor.kdtree)tmptree.make_copy(); _result.tmpmean = (double[])tmpmean.Clone(); return _result; } }; /************************************************************************* IDW fitting report: rmserror RMS error avgerror average error maxerror maximum error r2 coefficient of determination, R-squared, 1-RSS/TSS *************************************************************************/ public class idwreport : apobject { public double rmserror; public double avgerror; public double maxerror; public double r2; public idwreport() { init(); } public override void init() { } public override alglib.apobject make_copy() { idwreport _result = new idwreport(); _result.rmserror = rmserror; _result.avgerror = avgerror; _result.maxerror = maxerror; _result.r2 = r2; return _result; } }; public const double w0 = 1.0; public const double meps = 1.0E-50; public const int defaultnlayers = 16; public const double defaultlambda0 = 0.3333; /************************************************************************* This function creates buffer structure which can be used to perform parallel IDW model evaluations (with one IDW model instance being used from multiple threads, as long as different threads use different instances of buffer). This buffer object can be used with idwtscalcbuf() function (here "ts" stands for "thread-safe", "buf" is a suffix which denotes function which reuses previously allocated output space). How to use it: * create IDW model structure or load it from file * call idwcreatecalcbuffer(), once per thread working with IDW model (you should call this function only AFTER model initialization, see below for more information) * call idwtscalcbuf() from different threads, with each thread working with its own copy of buffer object. INPUT PARAMETERS S - IDW model OUTPUT PARAMETERS Buf - external buffer. IMPORTANT: buffer object should be used only with IDW model object which was used to initialize buffer. Any attempt to use buffer with different object is dangerous - you may get memory violation error because sizes of internal arrays do not fit to dimensions of the IDW structure. IMPORTANT: you should call this function only for model which was built with model builder (or unserialized from file). Sizes of some internal structures are determined only after model is built, so buffer object created before model construction stage will be useless (and any attempt to use it will result in exception). -- ALGLIB -- Copyright 22.10.2018 by Sergey Bochkanov *************************************************************************/ public static void idwcreatecalcbuffer(idwmodel s, idwcalcbuffer buf, alglib.xparams _params) { alglib.ap.assert(s.nx>=1, "IDWCreateCalcBuffer: integrity check failed"); alglib.ap.assert(s.ny>=1, "IDWCreateCalcBuffer: integrity check failed"); alglib.ap.assert(s.nlayers>=0, "IDWCreateCalcBuffer: integrity check failed"); alglib.ap.assert(s.algotype>=0, "IDWCreateCalcBuffer: integrity check failed"); if( s.nlayers>=1 && s.algotype!=0 ) { nearestneighbor.kdtreecreaterequestbuffer(s.tree, buf.requestbuffer, _params); } apserv.rvectorsetlengthatleast(ref buf.x, s.nx, _params); apserv.rvectorsetlengthatleast(ref buf.y, s.ny, _params); apserv.rvectorsetlengthatleast(ref buf.tsyw, s.ny*Math.Max(s.nlayers, 1), _params); apserv.rvectorsetlengthatleast(ref buf.tsw, Math.Max(s.nlayers, 1), _params); } /************************************************************************* This subroutine creates builder object used to generate IDW model from irregularly sampled (scattered) dataset. Multidimensional scalar/vector- -valued are supported. Builder object is used to fit model to data as follows: * builder object is created with idwbuildercreate() function * dataset is added with idwbuildersetpoints() function * one of the modern IDW algorithms is chosen with either: * idwbuildersetalgomstab() - Multilayer STABilized algorithm (interpolation) Alternatively, one of the textbook algorithms can be chosen (not recommended): * idwbuildersetalgotextbookshepard() - textbook Shepard algorithm * idwbuildersetalgotextbookmodshepard()-textbook modified Shepard algorithm * finally, model construction is performed with idwfit() function. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: NX - dimensionality of the argument, NX>=1 NY - dimensionality of the function being modeled, NY>=1; NY=1 corresponds to classic scalar function, NY>=1 corresponds to vector-valued function. OUTPUT PARAMETERS: State- builder object -- ALGLIB PROJECT -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildercreate(int nx, int ny, idwbuilder state, alglib.xparams _params) { alglib.ap.assert(nx>=1, "IDWBuilderCreate: NX<=0"); alglib.ap.assert(ny>=1, "IDWBuilderCreate: NY<=0"); // // We choose reasonable defaults for the algorithm: // * MSTAB algorithm // * 12 layers // * default radius // * default Lambda0 // state.algotype = 2; state.priortermtype = 2; apserv.rvectorsetlengthatleast(ref state.priortermval, ny, _params); state.nlayers = defaultnlayers; state.r0 = 0; state.rdecay = 0.5; state.lambda0 = defaultlambda0; state.lambdalast = 0; state.lambdadecay = 1.0; // // Other parameters, not used but initialized // state.shepardp = 0; // // Initial dataset is empty // state.npoints = 0; state.nx = nx; state.ny = ny; } /************************************************************************* This function changes number of layers used by IDW-MSTAB algorithm. The more layers you have, the finer details can be reproduced with IDW model. The less layers you have, the less memory and CPU time is consumed by the model. Memory consumption grows linearly with layers count, running time grows sub-linearly. The default number of layers is 16, which allows you to reproduce details at distance down to SRad/65536. You will rarely need to change it. INPUT PARAMETERS: State - builder object NLayers - NLayers>=1, the number of layers used by the model. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetnlayers(idwbuilder state, int nlayers, alglib.xparams _params) { alglib.ap.assert(nlayers>=1, "IDWBuilderSetNLayers: N<1"); state.nlayers = nlayers; } /************************************************************************* This function adds dataset to the builder object. This function overrides results of the previous calls, i.e. multiple calls of this function will result in only the last set being added. INPUT PARAMETERS: State - builder object XY - points, array[N,NX+NY]. One row corresponds to one point in the dataset. First NX elements are coordinates, next NY elements are function values. Array may be larger than specified, in this case only leading [N,NX+NY] elements will be used. N - number of points in the dataset, N>=0. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetpoints(idwbuilder state, double[,] xy, int n, alglib.xparams _params) { int i = 0; int j = 0; int ew = 0; alglib.ap.assert(n>=0, "IDWBuilderSetPoints: N<0"); alglib.ap.assert(alglib.ap.rows(xy)>=n, "IDWBuilderSetPoints: Rows(XY)=state.nx+state.ny, "IDWBuilderSetPoints: Cols(XY)0 is required. A model value is obtained by "smart" averaging of the dataset points within search radius. NOTE 1: IDW interpolation can correctly handle ANY dataset, including datasets with non-distinct points. In case non-distinct points are found, an average value for this point will be calculated. NOTE 2: the memory requirements for model storage are O(NPoints*NLayers). The model construction needs twice as much memory as model storage. NOTE 3: by default 16 IDW layers are built which is enough for most cases. You can change this parameter with idwbuildersetnlayers() method. Larger values may be necessary if you need to reproduce extrafine details at distances smaller than SRad/65536. Smaller value may be necessary if you have to save memory and computing time, and ready to sacrifice some model quality. ALGORITHM DESCRIPTION ALGLIB implementation of IDW is somewhat similar to the modified Shepard's method (one with search radius R) but overcomes several of its drawbacks, namely: 1) a tendency to show stepwise behavior for uniform datasets 2) a tendency to show terrible interpolation properties for highly nonuniform datasets which often arise in geospatial tasks (function values are densely sampled across multiple separated "tracks") IDW-MSTAB method performs several passes over dataset and builds a sequence of progressively refined IDW models (layers), which starts from one with largest search radius SRad and continues to smaller search radii until required number of layers is built. Highest layers reproduce global behavior of the target function at larger distances whilst lower layers reproduce fine details at smaller distances. Each layer is an IDW model built with following modifications: * weights go to zero when distance approach to the current search radius * an additional regularizing term is added to the distance: w=1/(d^2+lambda) * an additional fictional term with unit weight and zero function value is added in order to promote continuity properties at the isolated and boundary points By default, 16 layers is built, which is enough for most cases. You can change this parameter with idwbuildersetnlayers() method. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetalgomstab(idwbuilder state, double srad, alglib.xparams _params) { alglib.ap.assert(math.isfinite(srad), "IDWBuilderSetAlgoMSTAB: SRad is not finite"); alglib.ap.assert((double)(srad)>(double)(0), "IDWBuilderSetAlgoMSTAB: SRad<=0"); // // Set algorithm // state.algotype = 2; // // Set options // state.r0 = srad; state.rdecay = 0.5; state.lambda0 = defaultlambda0; state.lambdalast = 0; state.lambdadecay = 1.0; } /************************************************************************* This function sets IDW model construction algorithm to the textbook Shepard's algorithm with custom (user-specified) power parameter. IMPORTANT: we do NOT recommend using textbook IDW algorithms because they have terrible interpolation properties. Use MSTAB in all cases. INPUT PARAMETERS: State - builder object P - power parameter, P>0; good value to start with is 2.0 NOTE 1: IDW interpolation can correctly handle ANY dataset, including datasets with non-distinct points. In case non-distinct points are found, an average value for this point will be calculated. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetalgotextbookshepard(idwbuilder state, double p, alglib.xparams _params) { alglib.ap.assert(math.isfinite(p), "IDWBuilderSetAlgoShepard: P is not finite"); alglib.ap.assert((double)(p)>(double)(0), "IDWBuilderSetAlgoShepard: P<=0"); // // Set algorithm and options // state.algotype = 0; state.shepardp = p; } /************************************************************************* This function sets IDW model construction algorithm to the 'textbook' modified Shepard's algorithm with user-specified search radius. IMPORTANT: we do NOT recommend using textbook IDW algorithms because they have terrible interpolation properties. Use MSTAB in all cases. INPUT PARAMETERS: State - builder object R - search radius NOTE 1: IDW interpolation can correctly handle ANY dataset, including datasets with non-distinct points. In case non-distinct points are found, an average value for this point will be calculated. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetalgotextbookmodshepard(idwbuilder state, double r, alglib.xparams _params) { alglib.ap.assert(math.isfinite(r), "IDWBuilderSetAlgoModShepard: R is not finite"); alglib.ap.assert((double)(r)>(double)(0), "IDWBuilderSetAlgoModShepard: R<=0"); // // Set algorithm and options // state.algotype = 1; state.r0 = r; } /************************************************************************* This function sets prior term (model value at infinity) as user-specified value. INPUT PARAMETERS: S - spline builder V - value for user-defined prior NOTE: for vector-valued models all components of the prior are set to same user-specified value -- ALGLIB -- Copyright 29.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetuserterm(idwbuilder state, double v, alglib.xparams _params) { int j = 0; alglib.ap.assert(math.isfinite(v), "IDWBuilderSetUserTerm: infinite/NAN value passed"); state.priortermtype = 0; for(j=0; j<=state.ny-1; j++) { state.priortermval[j] = v; } } /************************************************************************* This function sets constant prior term (model value at infinity). Constant prior term is determined as mean value over dataset. INPUT PARAMETERS: S - spline builder -- ALGLIB -- Copyright 29.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetconstterm(idwbuilder state, alglib.xparams _params) { state.priortermtype = 2; } /************************************************************************* This function sets zero prior term (model value at infinity). INPUT PARAMETERS: S - spline builder -- ALGLIB -- Copyright 29.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwbuildersetzeroterm(idwbuilder state, alglib.xparams _params) { state.priortermtype = 3; } /************************************************************************* IDW interpolation: scalar target, 1-dimensional argument NOTE: this function modifies internal temporaries of the IDW model, thus IT IS NOT THREAD-SAFE! If you want to perform parallel model evaluation from the multiple threads, use idwtscalcbuf() with per- thread buffer object. INPUT PARAMETERS: S - IDW interpolant built with IDW builder X0 - argument value Result: IDW interpolant S(X0) -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static double idwcalc1(idwmodel s, double x0, alglib.xparams _params) { double result = 0; alglib.ap.assert(s.nx==1, "IDWCalc1: S.NX<>1"); alglib.ap.assert(s.ny==1, "IDWCalc1: S.NY<>1"); alglib.ap.assert(math.isfinite(x0), "IDWCalc1: X0 is INF or NAN"); s.buffer.x[0] = x0; idwtscalcbuf(s, s.buffer, s.buffer.x, ref s.buffer.y, _params); result = s.buffer.y[0]; return result; } /************************************************************************* IDW interpolation: scalar target, 2-dimensional argument NOTE: this function modifies internal temporaries of the IDW model, thus IT IS NOT THREAD-SAFE! If you want to perform parallel model evaluation from the multiple threads, use idwtscalcbuf() with per- thread buffer object. INPUT PARAMETERS: S - IDW interpolant built with IDW builder X0, X1 - argument value Result: IDW interpolant S(X0,X1) -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static double idwcalc2(idwmodel s, double x0, double x1, alglib.xparams _params) { double result = 0; alglib.ap.assert(s.nx==2, "IDWCalc2: S.NX<>2"); alglib.ap.assert(s.ny==1, "IDWCalc2: S.NY<>1"); alglib.ap.assert(math.isfinite(x0), "IDWCalc2: X0 is INF or NAN"); alglib.ap.assert(math.isfinite(x1), "IDWCalc2: X1 is INF or NAN"); s.buffer.x[0] = x0; s.buffer.x[1] = x1; idwtscalcbuf(s, s.buffer, s.buffer.x, ref s.buffer.y, _params); result = s.buffer.y[0]; return result; } /************************************************************************* IDW interpolation: scalar target, 3-dimensional argument NOTE: this function modifies internal temporaries of the IDW model, thus IT IS NOT THREAD-SAFE! If you want to perform parallel model evaluation from the multiple threads, use idwtscalcbuf() with per- thread buffer object. INPUT PARAMETERS: S - IDW interpolant built with IDW builder X0,X1,X2- argument value Result: IDW interpolant S(X0,X1,X2) -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static double idwcalc3(idwmodel s, double x0, double x1, double x2, alglib.xparams _params) { double result = 0; alglib.ap.assert(s.nx==3, "IDWCalc3: S.NX<>3"); alglib.ap.assert(s.ny==1, "IDWCalc3: S.NY<>1"); alglib.ap.assert(math.isfinite(x0), "IDWCalc3: X0 is INF or NAN"); alglib.ap.assert(math.isfinite(x1), "IDWCalc3: X1 is INF or NAN"); alglib.ap.assert(math.isfinite(x2), "IDWCalc3: X2 is INF or NAN"); s.buffer.x[0] = x0; s.buffer.x[1] = x1; s.buffer.x[2] = x2; idwtscalcbuf(s, s.buffer, s.buffer.x, ref s.buffer.y, _params); result = s.buffer.y[0]; return result; } /************************************************************************* This function calculates values of the IDW model at the given point. This is general function which can be used for arbitrary NX (dimension of the space of arguments) and NY (dimension of the function itself). However when you have NY=1 you may find more convenient to use idwcalc1(), idwcalc2() or idwcalc3(). NOTE: this function modifies internal temporaries of the IDW model, thus IT IS NOT THREAD-SAFE! If you want to perform parallel model evaluation from the multiple threads, use idwtscalcbuf() with per- thread buffer object. INPUT PARAMETERS: S - IDW model X - coordinates, array[NX]. X may have more than NX elements, in this case only leading NX will be used. OUTPUT PARAMETERS: Y - function value, array[NY]. Y is out-parameter and will be reallocated after call to this function. In case you want to reuse previously allocated Y, you may use idwcalcbuf(), which reallocates Y only when it is too small. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwcalc(idwmodel s, double[] x, ref double[] y, alglib.xparams _params) { y = new double[0]; idwtscalcbuf(s, s.buffer, x, ref y, _params); } /************************************************************************* This function calculates values of the IDW model at the given point. Same as idwcalc(), but does not reallocate Y when in is large enough to store function values. NOTE: this function modifies internal temporaries of the IDW model, thus IT IS NOT THREAD-SAFE! If you want to perform parallel model evaluation from the multiple threads, use idwtscalcbuf() with per- thread buffer object. INPUT PARAMETERS: S - IDW model X - coordinates, array[NX]. X may have more than NX elements, in this case only leading NX will be used. Y - possibly preallocated array OUTPUT PARAMETERS: Y - function value, array[NY]. Y is not reallocated when it is larger than NY. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwcalcbuf(idwmodel s, double[] x, ref double[] y, alglib.xparams _params) { idwtscalcbuf(s, s.buffer, x, ref y, _params); } /************************************************************************* This function calculates values of the IDW model at the given point, using external buffer object (internal temporaries of IDW model are not modified). This function allows to use same IDW model object in different threads, assuming that different threads use different instances of the buffer structure. INPUT PARAMETERS: S - IDW model, may be shared between different threads Buf - buffer object created for this particular instance of IDW model with idwcreatecalcbuffer(). X - coordinates, array[NX]. X may have more than NX elements, in this case only leading NX will be used. Y - possibly preallocated array OUTPUT PARAMETERS: Y - function value, array[NY]. Y is not reallocated when it is larger than NY. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void idwtscalcbuf(idwmodel s, idwcalcbuffer buf, double[] x, ref double[] y, alglib.xparams _params) { int i = 0; int j = 0; int ew = 0; int k = 0; int layeridx = 0; int nx = 0; int ny = 0; int npoints = 0; double v = 0; double vv = 0; double f = 0; double p = 0; double r = 0; double eps = 0; double lambdacur = 0; double lambdadecay = 0; double invrdecay = 0; double invr = 0; bool fastcalcpossible = new bool(); double wf0 = 0; double ws0 = 0; double wf1 = 0; double ws1 = 0; nx = s.nx; ny = s.ny; alglib.ap.assert(alglib.ap.len(x)>=nx, "IDWTsCalcBuf: Length(X)0, "IDWTsCalcBuf: integrity check failed"); eps = 1.0E-50; ew = nx+ny; p = s.shepardp; for(j=0; j<=ny-1; j++) { y[j] = 0; buf.tsyw[j] = eps; } for(i=0; i<=npoints-1; i++) { // // Compute squared distance // v = 0; for(j=0; j<=nx-1; j++) { vv = s.shepardxy[i*ew+j]-x[j]; v = v+vv*vv; } // // Compute weight (with small regularizing addition) // v = Math.Pow(v, p*0.5); v = 1/(eps+v); // // Accumulate // for(j=0; j<=ny-1; j++) { y[j] = y[j]+v*s.shepardxy[i*ew+nx+j]; buf.tsyw[j] = buf.tsyw[j]+v; } } for(j=0; j<=ny-1; j++) { y[j] = y[j]/buf.tsyw[j]+s.globalprior[j]; } return; } // // Textbook modified Shepard's method // if( s.algotype==1 ) { eps = 1.0E-50; r = s.r0; for(j=0; j<=ny-1; j++) { y[j] = 0; buf.tsyw[j] = eps; } k = nearestneighbor.kdtreetsqueryrnn(s.tree, buf.requestbuffer, x, r, true, _params); nearestneighbor.kdtreetsqueryresultsxy(s.tree, buf.requestbuffer, ref buf.tsxy, _params); nearestneighbor.kdtreetsqueryresultsdistances(s.tree, buf.requestbuffer, ref buf.tsdist, _params); for(i=0; i<=k-1; i++) { v = buf.tsdist[i]; v = (r-v)/(r*v+eps); v = v*v; for(j=0; j<=ny-1; j++) { y[j] = y[j]+v*buf.tsxy[i,nx+j]; buf.tsyw[j] = buf.tsyw[j]+v; } } for(j=0; j<=ny-1; j++) { y[j] = y[j]/buf.tsyw[j]+s.globalprior[j]; } return; } // // MSTAB // if( s.algotype==2 ) { alglib.ap.assert((double)(w0)==(double)(1), "IDWTsCalcBuf: unexpected W0, integrity check failed"); invrdecay = 1/s.rdecay; invr = 1/s.r0; lambdadecay = s.lambdadecay; fastcalcpossible = (ny==1 && s.nlayers>=3) && (double)(lambdadecay)==(double)(1); if( fastcalcpossible ) { // // Important special case, NY=1, no lambda-decay, // we can perform optimized fast evaluation // wf0 = 0; ws0 = w0; wf1 = 0; ws1 = w0; for(j=0; j<=s.nlayers-1; j++) { buf.tsyw[j] = 0; buf.tsw[j] = w0; } } else { // // Setup variables for generic evaluation path // for(j=0; j<=ny*s.nlayers-1; j++) { buf.tsyw[j] = 0; } for(j=0; j<=s.nlayers-1; j++) { buf.tsw[j] = w0; } } k = nearestneighbor.kdtreetsqueryrnnu(s.tree, buf.requestbuffer, x, s.r0, true, _params); nearestneighbor.kdtreetsqueryresultsxy(s.tree, buf.requestbuffer, ref buf.tsxy, _params); nearestneighbor.kdtreetsqueryresultsdistances(s.tree, buf.requestbuffer, ref buf.tsdist, _params); for(i=0; i<=k-1; i++) { lambdacur = s.lambda0; vv = buf.tsdist[i]*invr; if( fastcalcpossible ) { // // Important special case, fast evaluation possible // v = vv*vv; v = (1-v)*(1-v)/(v+lambdacur); f = buf.tsxy[i,nx+0]; wf0 = wf0+v*f; ws0 = ws0+v; vv = vv*invrdecay; if( vv>=1.0 ) { continue; } v = vv*vv; v = (1-v)*(1-v)/(v+lambdacur); f = buf.tsxy[i,nx+1]; wf1 = wf1+v*f; ws1 = ws1+v; vv = vv*invrdecay; if( vv>=1.0 ) { continue; } for(layeridx=2; layeridx<=s.nlayers-1; layeridx++) { if( layeridx==s.nlayers-1 ) { lambdacur = s.lambdalast; } v = vv*vv; v = (1-v)*(1-v)/(v+lambdacur); f = buf.tsxy[i,nx+layeridx]; buf.tsyw[layeridx] = buf.tsyw[layeridx]+v*f; buf.tsw[layeridx] = buf.tsw[layeridx]+v; vv = vv*invrdecay; if( vv>=1.0 ) { break; } } } else { // // General case // for(layeridx=0; layeridx<=s.nlayers-1; layeridx++) { if( layeridx==s.nlayers-1 ) { lambdacur = s.lambdalast; } if( vv>=1.0 ) { break; } v = vv*vv; v = (1-v)*(1-v)/(v+lambdacur); for(j=0; j<=ny-1; j++) { f = buf.tsxy[i,nx+layeridx*ny+j]; buf.tsyw[layeridx*ny+j] = buf.tsyw[layeridx*ny+j]+v*f; } buf.tsw[layeridx] = buf.tsw[layeridx]+v; lambdacur = lambdacur*lambdadecay; vv = vv*invrdecay; } } } if( fastcalcpossible ) { // // Important special case, finalize evaluations // buf.tsyw[0] = wf0; buf.tsw[0] = ws0; buf.tsyw[1] = wf1; buf.tsw[1] = ws1; } for(j=0; j<=ny-1; j++) { y[j] = s.globalprior[j]; } for(layeridx=0; layeridx<=s.nlayers-1; layeridx++) { for(j=0; j<=ny-1; j++) { y[j] = y[j]+buf.tsyw[layeridx*ny+j]/buf.tsw[layeridx]; } } return; } // // // alglib.ap.assert(false, "IDWTsCalcBuf: unexpected AlgoType"); } /************************************************************************* This function fits IDW model to the dataset using current IDW construction algorithm. A model being built and fitting report are returned. INPUT PARAMETERS: State - builder object OUTPUT PARAMETERS: Model - an IDW model built with current algorithm Rep - model fitting report, fields of this structure contain information about average fitting errors. NOTE: although IDW-MSTAB algorithm is an interpolation method, i.e. it tries to fit the model exactly, it can handle datasets with non- distinct points which can not be fit exactly; in such cases least- squares fitting is performed. -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ public static void idwfit(idwbuilder state, idwmodel model, idwreport rep, alglib.xparams _params) { int i = 0; int i0 = 0; int j = 0; int k = 0; int layeridx = 0; int srcidx = 0; double v = 0; double vv = 0; int npoints = 0; int nx = 0; int ny = 0; double rcur = 0; double lambdacur = 0; double rss = 0; double tss = 0; nx = state.nx; ny = state.ny; npoints = state.npoints; // // Clear report fields // rep.rmserror = 0; rep.avgerror = 0; rep.maxerror = 0; rep.r2 = 1.0; // // Quick exit for empty dataset // if( state.npoints==0 ) { model.nx = nx; model.ny = ny; model.globalprior = new double[ny]; for(i=0; i<=ny-1; i++) { model.globalprior[i] = 0; } model.algotype = 0; model.nlayers = 0; model.r0 = 1; model.rdecay = 0.5; model.lambda0 = 0; model.lambdalast = 0; model.lambdadecay = 1; model.shepardp = 2; model.npoints = 0; idwcreatecalcbuffer(model, model.buffer, _params); return; } // // Compute temporaries which will be required later: // * global mean // alglib.ap.assert(state.npoints>0, "IDWFit: integrity check failed"); apserv.rvectorsetlengthatleast(ref state.tmpmean, ny, _params); for(j=0; j<=ny-1; j++) { state.tmpmean[j] = 0; } for(i=0; i<=npoints-1; i++) { for(j=0; j<=ny-1; j++) { state.tmpmean[j] = state.tmpmean[j]+state.xy[i*(nx+ny)+nx+j]; } } for(j=0; j<=ny-1; j++) { state.tmpmean[j] = state.tmpmean[j]/npoints; } // // Compute global prior // // NOTE: for original Shepard's method it is always mean value // apserv.rvectorsetlengthatleast(ref model.globalprior, ny, _params); for(j=0; j<=ny-1; j++) { model.globalprior[j] = state.tmpmean[j]; } if( state.algotype!=0 ) { // // Algorithm is set to one of the "advanced" versions with search // radius which can handle non-mean prior term // if( state.priortermtype==0 ) { // // User-specified prior // for(j=0; j<=ny-1; j++) { model.globalprior[j] = state.priortermval[j]; } } if( state.priortermtype==3 ) { // // Zero prior // for(j=0; j<=ny-1; j++) { model.globalprior[j] = 0; } } } // // Textbook Shepard // if( state.algotype==0 ) { // // Initialize model // model.algotype = 0; model.nx = nx; model.ny = ny; model.nlayers = 1; model.r0 = 1; model.rdecay = 0.5; model.lambda0 = 0; model.lambdalast = 0; model.lambdadecay = 1; model.shepardp = state.shepardp; // // Copy dataset // apserv.rvectorsetlengthatleast(ref model.shepardxy, npoints*(nx+ny), _params); for(i=0; i<=npoints-1; i++) { for(j=0; j<=nx-1; j++) { model.shepardxy[i*(nx+ny)+j] = state.xy[i*(nx+ny)+j]; } for(j=0; j<=ny-1; j++) { model.shepardxy[i*(nx+ny)+nx+j] = state.xy[i*(nx+ny)+nx+j]-model.globalprior[j]; } } model.npoints = npoints; // // Prepare internal buffer // Evaluate report fields // idwcreatecalcbuffer(model, model.buffer, _params); errormetricsviacalc(state, model, rep, _params); return; } // // Textbook modified Shepard's method // if( state.algotype==1 ) { // // Initialize model // model.algotype = 1; model.nx = nx; model.ny = ny; model.nlayers = 1; model.r0 = state.r0; model.rdecay = 1; model.lambda0 = 0; model.lambdalast = 0; model.lambdadecay = 1; model.shepardp = 0; // // Build kd-tree search structure // apserv.rmatrixsetlengthatleast(ref state.tmpxy, npoints, nx+ny, _params); for(i=0; i<=npoints-1; i++) { for(j=0; j<=nx-1; j++) { state.tmpxy[i,j] = state.xy[i*(nx+ny)+j]; } for(j=0; j<=ny-1; j++) { state.tmpxy[i,nx+j] = state.xy[i*(nx+ny)+nx+j]-model.globalprior[j]; } } nearestneighbor.kdtreebuild(state.tmpxy, npoints, nx, ny, 2, model.tree, _params); // // Prepare internal buffer // Evaluate report fields // idwcreatecalcbuffer(model, model.buffer, _params); errormetricsviacalc(state, model, rep, _params); return; } // // MSTAB algorithm // if( state.algotype==2 ) { alglib.ap.assert(state.nlayers>=1, "IDWFit: integrity check failed"); // // Initialize model // model.algotype = 2; model.nx = nx; model.ny = ny; model.nlayers = state.nlayers; model.r0 = state.r0; model.rdecay = 0.5; model.lambda0 = state.lambda0; model.lambdadecay = 1.0; model.lambdalast = meps; model.shepardp = 0; // // Build kd-tree search structure, // prepare input residuals for the first layer of the model // apserv.rmatrixsetlengthatleast(ref state.tmpxy, npoints, nx, _params); apserv.rmatrixsetlengthatleast(ref state.tmplayers, npoints, nx+ny*(state.nlayers+1), _params); apserv.ivectorsetlengthatleast(ref state.tmptags, npoints, _params); for(i=0; i<=npoints-1; i++) { for(j=0; j<=nx-1; j++) { v = state.xy[i*(nx+ny)+j]; state.tmpxy[i,j] = v; state.tmplayers[i,j] = v; } state.tmptags[i] = i; for(j=0; j<=ny-1; j++) { state.tmplayers[i,nx+j] = state.xy[i*(nx+ny)+nx+j]-model.globalprior[j]; } } nearestneighbor.kdtreebuildtagged(state.tmpxy, state.tmptags, npoints, nx, 0, 2, state.tmptree, _params); // // Iteratively build layer by layer // apserv.rvectorsetlengthatleast(ref state.tmpx, nx, _params); apserv.rvectorsetlengthatleast(ref state.tmpwy, ny, _params); apserv.rvectorsetlengthatleast(ref state.tmpw, ny, _params); for(layeridx=0; layeridx<=state.nlayers-1; layeridx++) { // // Determine layer metrics // rcur = model.r0*Math.Pow(model.rdecay, layeridx); lambdacur = model.lambda0*Math.Pow(model.lambdadecay, layeridx); if( layeridx==state.nlayers-1 ) { lambdacur = model.lambdalast; } // // For each point compute residual from fitting with current layer // for(i=0; i<=npoints-1; i++) { for(j=0; j<=nx-1; j++) { state.tmpx[j] = state.tmplayers[i,j]; } k = nearestneighbor.kdtreequeryrnn(state.tmptree, state.tmpx, rcur, true, _params); nearestneighbor.kdtreequeryresultstags(state.tmptree, ref state.tmptags, _params); nearestneighbor.kdtreequeryresultsdistances(state.tmptree, ref state.tmpdist, _params); for(j=0; j<=ny-1; j++) { state.tmpwy[j] = 0; state.tmpw[j] = w0; } for(i0=0; i0<=k-1; i0++) { vv = state.tmpdist[i0]/rcur; vv = vv*vv; v = (1-vv)*(1-vv)/(vv+lambdacur); srcidx = state.tmptags[i0]; for(j=0; j<=ny-1; j++) { state.tmpwy[j] = state.tmpwy[j]+v*state.tmplayers[srcidx,nx+layeridx*ny+j]; state.tmpw[j] = state.tmpw[j]+v; } } for(j=0; j<=ny-1; j++) { v = state.tmplayers[i,nx+layeridx*ny+j]; state.tmplayers[i,nx+(layeridx+1)*ny+j] = v-state.tmpwy[j]/state.tmpw[j]; } } } nearestneighbor.kdtreebuild(state.tmplayers, npoints, nx, ny*state.nlayers, 2, model.tree, _params); // // Evaluate report fields // rep.rmserror = 0; rep.avgerror = 0; rep.maxerror = 0; rss = 0; tss = 0; for(i=0; i<=npoints-1; i++) { for(j=0; j<=ny-1; j++) { v = Math.Abs(state.tmplayers[i,nx+state.nlayers*ny+j]); rep.rmserror = rep.rmserror+v*v; rep.avgerror = rep.avgerror+v; rep.maxerror = Math.Max(rep.maxerror, Math.Abs(v)); rss = rss+v*v; tss = tss+math.sqr(state.xy[i*(nx+ny)+nx+j]-state.tmpmean[j]); } } rep.rmserror = Math.Sqrt(rep.rmserror/(npoints*ny)); rep.avgerror = rep.avgerror/(npoints*ny); rep.r2 = 1.0-rss/apserv.coalesce(tss, 1.0, _params); // // Prepare internal buffer // idwcreatecalcbuffer(model, model.buffer, _params); return; } // // Unknown algorithm // alglib.ap.assert(false, "IDWFit: integrity check failed, unexpected algorithm"); } /************************************************************************* Serializer: allocation -- ALGLIB -- Copyright 28.02.2018 by Bochkanov Sergey *************************************************************************/ public static void idwalloc(alglib.serializer s, idwmodel model, alglib.xparams _params) { bool processed = new bool(); // // Header // s.alloc_entry(); // // Algorithm type and fields which are set for all algorithms // s.alloc_entry(); s.alloc_entry(); s.alloc_entry(); apserv.allocrealarray(s, model.globalprior, -1, _params); s.alloc_entry(); s.alloc_entry(); s.alloc_entry(); s.alloc_entry(); s.alloc_entry(); s.alloc_entry(); s.alloc_entry(); // // Algorithm-specific fields // processed = false; if( model.algotype==0 ) { s.alloc_entry(); apserv.allocrealarray(s, model.shepardxy, -1, _params); processed = true; } if( model.algotype>0 ) { nearestneighbor.kdtreealloc(s, model.tree, _params); processed = true; } alglib.ap.assert(processed, "IDW: integrity check failed during serialization"); } /************************************************************************* Serializer: serialization -- ALGLIB -- Copyright 28.02.2018 by Bochkanov Sergey *************************************************************************/ public static void idwserialize(alglib.serializer s, idwmodel model, alglib.xparams _params) { bool processed = new bool(); // // Header // s.serialize_int(scodes.getidwserializationcode(_params)); // // Algorithm type and fields which are set for all algorithms // s.serialize_int(model.algotype); s.serialize_int(model.nx); s.serialize_int(model.ny); apserv.serializerealarray(s, model.globalprior, -1, _params); s.serialize_int(model.nlayers); s.serialize_double(model.r0); s.serialize_double(model.rdecay); s.serialize_double(model.lambda0); s.serialize_double(model.lambdalast); s.serialize_double(model.lambdadecay); s.serialize_double(model.shepardp); // // Algorithm-specific fields // processed = false; if( model.algotype==0 ) { s.serialize_int(model.npoints); apserv.serializerealarray(s, model.shepardxy, -1, _params); processed = true; } if( model.algotype>0 ) { nearestneighbor.kdtreeserialize(s, model.tree, _params); processed = true; } alglib.ap.assert(processed, "IDW: integrity check failed during serialization"); } /************************************************************************* Serializer: unserialization -- ALGLIB -- Copyright 28.02.2018 by Bochkanov Sergey *************************************************************************/ public static void idwunserialize(alglib.serializer s, idwmodel model, alglib.xparams _params) { bool processed = new bool(); int scode = 0; // // Header // scode = s.unserialize_int(); alglib.ap.assert(scode==scodes.getidwserializationcode(_params), "IDWUnserialize: stream header corrupted"); // // Algorithm type and fields which are set for all algorithms // model.algotype = s.unserialize_int(); model.nx = s.unserialize_int(); model.ny = s.unserialize_int(); apserv.unserializerealarray(s, ref model.globalprior, _params); model.nlayers = s.unserialize_int(); model.r0 = s.unserialize_double(); model.rdecay = s.unserialize_double(); model.lambda0 = s.unserialize_double(); model.lambdalast = s.unserialize_double(); model.lambdadecay = s.unserialize_double(); model.shepardp = s.unserialize_double(); // // Algorithm-specific fields // processed = false; if( model.algotype==0 ) { model.npoints = s.unserialize_int(); apserv.unserializerealarray(s, ref model.shepardxy, _params); processed = true; } if( model.algotype>0 ) { nearestneighbor.kdtreeunserialize(s, model.tree, _params); processed = true; } alglib.ap.assert(processed, "IDW: integrity check failed during serialization"); // // Temporary buffers // idwcreatecalcbuffer(model, model.buffer, _params); } /************************************************************************* This function evaluates error metrics for the model using IDWTsCalcBuf() to calculate model at each point. NOTE: modern IDW algorithms (MSTAB, MSMOOTH) can generate residuals during model construction, so they do not need this function in order to evaluate error metrics. Following fields of Rep are filled: * rep.rmserror * rep.avgerror * rep.maxerror * rep.r2 -- ALGLIB -- Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ private static void errormetricsviacalc(idwbuilder state, idwmodel model, idwreport rep, alglib.xparams _params) { int npoints = 0; int nx = 0; int ny = 0; int i = 0; int j = 0; double v = 0; double vv = 0; double rss = 0; double tss = 0; npoints = state.npoints; nx = state.nx; ny = state.ny; if( npoints==0 ) { rep.rmserror = 0; rep.avgerror = 0; rep.maxerror = 0; rep.r2 = 1; return; } rep.rmserror = 0; rep.avgerror = 0; rep.maxerror = 0; rss = 0; tss = 0; for(i=0; i<=npoints-1; i++) { for(j=0; j<=nx-1; j++) { model.buffer.x[j] = state.xy[i*(nx+ny)+j]; } idwtscalcbuf(model, model.buffer, model.buffer.x, ref model.buffer.y, _params); for(j=0; j<=ny-1; j++) { vv = state.xy[i*(nx+ny)+nx+j]; v = Math.Abs(vv-model.buffer.y[j]); rep.rmserror = rep.rmserror+v*v; rep.avgerror = rep.avgerror+v; rep.maxerror = Math.Max(rep.maxerror, v); rss = rss+v*v; tss = tss+math.sqr(vv-state.tmpmean[j]); } } rep.rmserror = Math.Sqrt(rep.rmserror/(npoints*ny)); rep.avgerror = rep.avgerror/(npoints*ny); rep.r2 = 1.0-rss/apserv.coalesce(tss, 1.0, _params); } } public class ratint { /************************************************************************* Barycentric interpolant. *************************************************************************/ public class barycentricinterpolant : apobject { public int n; public double sy; public double[] x; public double[] y; public double[] w; public barycentricinterpolant() { init(); } public override void init() { x = new double[0]; y = new double[0]; w = new double[0]; } public override alglib.apobject make_copy() { barycentricinterpolant _result = new barycentricinterpolant(); _result.n = n; _result.sy = sy; _result.x = (double[])x.Clone(); _result.y = (double[])y.Clone(); _result.w = (double[])w.Clone(); return _result; } }; /************************************************************************* Rational interpolation using barycentric formula F(t) = SUM(i=0,n-1,w[i]*f[i]/(t-x[i])) / SUM(i=0,n-1,w[i]/(t-x[i])) Input parameters: B - barycentric interpolant built with one of model building subroutines. T - interpolation point Result: barycentric interpolant F(t) -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static double barycentriccalc(barycentricinterpolant b, double t, alglib.xparams _params) { double result = 0; double s1 = 0; double s2 = 0; double s = 0; double v = 0; int i = 0; alglib.ap.assert(!Double.IsInfinity(t), "BarycentricCalc: infinite T!"); // // special case: NaN // if( Double.IsNaN(t) ) { result = Double.NaN; return result; } // // special case: N=1 // if( b.n==1 ) { result = b.sy*b.y[0]; return result; } // // Here we assume that task is normalized, i.e.: // 1. abs(Y[i])<=1 // 2. abs(W[i])<=1 // 3. X[] is ordered // s = Math.Abs(t-b.x[0]); for(i=0; i<=b.n-1; i++) { v = b.x[i]; if( (double)(v)==(double)(t) ) { result = b.sy*b.y[i]; return result; } v = Math.Abs(t-v); if( (double)(v)<(double)(s) ) { s = v; } } s1 = 0; s2 = 0; for(i=0; i<=b.n-1; i++) { v = s/(t-b.x[i]); v = v*b.w[i]; s1 = s1+v*b.y[i]; s2 = s2+v; } result = b.sy*s1/s2; return result; } /************************************************************************* Differentiation of barycentric interpolant: first derivative. Algorithm used in this subroutine is very robust and should not fail until provided with values too close to MaxRealNumber (usually MaxRealNumber/N or greater will overflow). INPUT PARAMETERS: B - barycentric interpolant built with one of model building subroutines. T - interpolation point OUTPUT PARAMETERS: F - barycentric interpolant at T DF - first derivative NOTE -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentricdiff1(barycentricinterpolant b, double t, ref double f, ref double df, alglib.xparams _params) { double v = 0; double vv = 0; int i = 0; int k = 0; double n0 = 0; double n1 = 0; double d0 = 0; double d1 = 0; double s0 = 0; double s1 = 0; double xk = 0; double xi = 0; double xmin = 0; double xmax = 0; double xscale1 = 0; double xoffs1 = 0; double xscale2 = 0; double xoffs2 = 0; double xprev = 0; f = 0; df = 0; alglib.ap.assert(!Double.IsInfinity(t), "BarycentricDiff1: infinite T!"); // // special case: NaN // if( Double.IsNaN(t) ) { f = Double.NaN; df = Double.NaN; return; } // // special case: N=1 // if( b.n==1 ) { f = b.sy*b.y[0]; df = 0; return; } if( (double)(b.sy)==(double)(0) ) { f = 0; df = 0; return; } alglib.ap.assert((double)(b.sy)>(double)(0), "BarycentricDiff1: internal error"); // // We assume than N>1 and B.SY>0. Find: // 1. pivot point (X[i] closest to T) // 2. width of interval containing X[i] // v = Math.Abs(b.x[0]-t); k = 0; xmin = b.x[0]; xmax = b.x[0]; for(i=1; i<=b.n-1; i++) { vv = b.x[i]; if( (double)(Math.Abs(vv-t))<(double)(v) ) { v = Math.Abs(vv-t); k = i; } xmin = Math.Min(xmin, vv); xmax = Math.Max(xmax, vv); } // // pivot point found, calculate dNumerator and dDenominator // xscale1 = 1/(xmax-xmin); xoffs1 = -(xmin/(xmax-xmin))+1; xscale2 = 2; xoffs2 = -3; t = t*xscale1+xoffs1; t = t*xscale2+xoffs2; xk = b.x[k]; xk = xk*xscale1+xoffs1; xk = xk*xscale2+xoffs2; v = t-xk; n0 = 0; n1 = 0; d0 = 0; d1 = 0; xprev = -2; for(i=0; i<=b.n-1; i++) { xi = b.x[i]; xi = xi*xscale1+xoffs1; xi = xi*xscale2+xoffs2; alglib.ap.assert((double)(xi)>(double)(xprev), "BarycentricDiff1: points are too close!"); xprev = xi; if( i!=k ) { vv = math.sqr(t-xi); s0 = (t-xk)/(t-xi); s1 = (xk-xi)/vv; } else { s0 = 1; s1 = 0; } vv = b.w[i]*b.y[i]; n0 = n0+s0*vv; n1 = n1+s1*vv; vv = b.w[i]; d0 = d0+s0*vv; d1 = d1+s1*vv; } f = b.sy*n0/d0; df = (n1*d0-n0*d1)/math.sqr(d0); if( (double)(df)!=(double)(0) ) { df = Math.Sign(df)*Math.Exp(Math.Log(Math.Abs(df))+Math.Log(b.sy)+Math.Log(xscale1)+Math.Log(xscale2)); } } /************************************************************************* Differentiation of barycentric interpolant: first/second derivatives. INPUT PARAMETERS: B - barycentric interpolant built with one of model building subroutines. T - interpolation point OUTPUT PARAMETERS: F - barycentric interpolant at T DF - first derivative D2F - second derivative NOTE: this algorithm may fail due to overflow/underflor if used on data whose values are close to MaxRealNumber or MinRealNumber. Use more robust BarycentricDiff1() subroutine in such cases. -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentricdiff2(barycentricinterpolant b, double t, ref double f, ref double df, ref double d2f, alglib.xparams _params) { double v = 0; double vv = 0; int i = 0; int k = 0; double n0 = 0; double n1 = 0; double n2 = 0; double d0 = 0; double d1 = 0; double d2 = 0; double s0 = 0; double s1 = 0; double s2 = 0; double xk = 0; double xi = 0; f = 0; df = 0; d2f = 0; alglib.ap.assert(!Double.IsInfinity(t), "BarycentricDiff1: infinite T!"); // // special case: NaN // if( Double.IsNaN(t) ) { f = Double.NaN; df = Double.NaN; d2f = Double.NaN; return; } // // special case: N=1 // if( b.n==1 ) { f = b.sy*b.y[0]; df = 0; d2f = 0; return; } if( (double)(b.sy)==(double)(0) ) { f = 0; df = 0; d2f = 0; return; } // // We assume than N>1 and B.SY>0. Find: // 1. pivot point (X[i] closest to T) // 2. width of interval containing X[i] // alglib.ap.assert((double)(b.sy)>(double)(0), "BarycentricDiff: internal error"); f = 0; df = 0; d2f = 0; v = Math.Abs(b.x[0]-t); k = 0; for(i=1; i<=b.n-1; i++) { vv = b.x[i]; if( (double)(Math.Abs(vv-t))<(double)(v) ) { v = Math.Abs(vv-t); k = i; } } // // pivot point found, calculate dNumerator and dDenominator // xk = b.x[k]; v = t-xk; n0 = 0; n1 = 0; n2 = 0; d0 = 0; d1 = 0; d2 = 0; for(i=0; i<=b.n-1; i++) { if( i!=k ) { xi = b.x[i]; vv = math.sqr(t-xi); s0 = (t-xk)/(t-xi); s1 = (xk-xi)/vv; s2 = -(2*(xk-xi)/(vv*(t-xi))); } else { s0 = 1; s1 = 0; s2 = 0; } vv = b.w[i]*b.y[i]; n0 = n0+s0*vv; n1 = n1+s1*vv; n2 = n2+s2*vv; vv = b.w[i]; d0 = d0+s0*vv; d1 = d1+s1*vv; d2 = d2+s2*vv; } f = b.sy*n0/d0; df = b.sy*(n1*d0-n0*d1)/math.sqr(d0); d2f = b.sy*((n2*d0-n0*d2)*math.sqr(d0)-(n1*d0-n0*d1)*2*d0*d1)/math.sqr(math.sqr(d0)); } /************************************************************************* This subroutine performs linear transformation of the argument. INPUT PARAMETERS: B - rational interpolant in barycentric form CA, CB - transformation coefficients: x = CA*t + CB OUTPUT PARAMETERS: B - transformed interpolant with X replaced by T -- ALGLIB PROJECT -- Copyright 19.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentriclintransx(barycentricinterpolant b, double ca, double cb, alglib.xparams _params) { int i = 0; int j = 0; double v = 0; // // special case, replace by constant F(CB) // if( (double)(ca)==(double)(0) ) { b.sy = barycentriccalc(b, cb, _params); v = 1; for(i=0; i<=b.n-1; i++) { b.y[i] = 1; b.w[i] = v; v = -v; } return; } // // general case: CA<>0 // for(i=0; i<=b.n-1; i++) { b.x[i] = (b.x[i]-cb)/ca; } if( (double)(ca)<(double)(0) ) { for(i=0; i<=b.n-1; i++) { if( i(double)(0) ) { v = 1/b.sy; for(i_=0; i_<=b.n-1;i_++) { b.y[i_] = v*b.y[i_]; } } } /************************************************************************* Extracts X/Y/W arrays from rational interpolant INPUT PARAMETERS: B - barycentric interpolant OUTPUT PARAMETERS: N - nodes count, N>0 X - interpolation nodes, array[0..N-1] F - function values, array[0..N-1] W - barycentric weights, array[0..N-1] -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentricunpack(barycentricinterpolant b, ref int n, ref double[] x, ref double[] y, ref double[] w, alglib.xparams _params) { double v = 0; int i_ = 0; n = 0; x = new double[0]; y = new double[0]; w = new double[0]; n = b.n; x = new double[n]; y = new double[n]; w = new double[n]; v = b.sy; for(i_=0; i_<=n-1;i_++) { x[i_] = b.x[i_]; } for(i_=0; i_<=n-1;i_++) { y[i_] = v*b.y[i_]; } for(i_=0; i_<=n-1;i_++) { w[i_] = b.w[i_]; } } /************************************************************************* Rational interpolant from X/Y/W arrays F(t) = SUM(i=0,n-1,w[i]*f[i]/(t-x[i])) / SUM(i=0,n-1,w[i]/(t-x[i])) INPUT PARAMETERS: X - interpolation nodes, array[0..N-1] F - function values, array[0..N-1] W - barycentric weights, array[0..N-1] N - nodes count, N>0 OUTPUT PARAMETERS: B - barycentric interpolant built from (X, Y, W) -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentricbuildxyw(double[] x, double[] y, double[] w, int n, barycentricinterpolant b, alglib.xparams _params) { int i_ = 0; alglib.ap.assert(n>0, "BarycentricBuildXYW: incorrect N!"); // // fill X/Y/W // b.x = new double[n]; b.y = new double[n]; b.w = new double[n]; for(i_=0; i_<=n-1;i_++) { b.x[i_] = x[i_]; } for(i_=0; i_<=n-1;i_++) { b.y[i_] = y[i_]; } for(i_=0; i_<=n-1;i_++) { b.w[i_] = w[i_]; } b.n = n; // // Normalize // barycentricnormalize(b, _params); } /************************************************************************* Rational interpolant without poles The subroutine constructs the rational interpolating function without real poles (see 'Barycentric rational interpolation with no poles and high rates of approximation', Michael S. Floater. and Kai Hormann, for more information on this subject). Input parameters: X - interpolation nodes, array[0..N-1]. Y - function values, array[0..N-1]. N - number of nodes, N>0. D - order of the interpolation scheme, 0 <= D <= N-1. D<0 will cause an error. D>=N it will be replaced with D=N-1. if you don't know what D to choose, use small value about 3-5. Output parameters: B - barycentric interpolant. Note: this algorithm always succeeds and calculates the weights with close to machine precision. -- ALGLIB PROJECT -- Copyright 17.06.2007 by Bochkanov Sergey *************************************************************************/ public static void barycentricbuildfloaterhormann(double[] x, double[] y, int n, int d, barycentricinterpolant b, alglib.xparams _params) { double s0 = 0; double s = 0; double v = 0; int i = 0; int j = 0; int k = 0; int[] perm = new int[0]; double[] wtemp = new double[0]; double[] sortrbuf = new double[0]; double[] sortrbuf2 = new double[0]; int i_ = 0; alglib.ap.assert(n>0, "BarycentricFloaterHormann: N<=0!"); alglib.ap.assert(d>=0, "BarycentricFloaterHormann: incorrect D!"); // // Prepare // if( d>n-1 ) { d = n-1; } b.n = n; // // special case: N=1 // if( n==1 ) { b.x = new double[n]; b.y = new double[n]; b.w = new double[n]; b.x[0] = x[0]; b.y[0] = y[0]; b.w[0] = 1; barycentricnormalize(b, _params); return; } // // Fill X/Y // b.x = new double[n]; b.y = new double[n]; for(i_=0; i_<=n-1;i_++) { b.x[i_] = x[i_]; } for(i_=0; i_<=n-1;i_++) { b.y[i_] = y[i_]; } tsort.tagsortfastr(ref b.x, ref b.y, ref sortrbuf, ref sortrbuf2, n, _params); // // Calculate Wk // b.w = new double[n]; s0 = 1; for(k=1; k<=d; k++) { s0 = -s0; } for(k=0; k<=n-1; k++) { // // Wk // s = 0; for(i=Math.Max(k-d, 0); i<=Math.Min(k, n-1-d); i++) { v = 1; for(j=i; j<=i+d; j++) { if( j!=k ) { v = v/Math.Abs(b.x[k]-b.x[j]); } } s = s+v; } b.w[k] = s0*s; // // Next S0 // s0 = -s0; } // // Normalize // barycentricnormalize(b, _params); } /************************************************************************* Copying of the barycentric interpolant (for internal use only) INPUT PARAMETERS: B - barycentric interpolant OUTPUT PARAMETERS: B2 - copy(B1) -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentriccopy(barycentricinterpolant b, barycentricinterpolant b2, alglib.xparams _params) { int i_ = 0; b2.n = b.n; b2.sy = b.sy; b2.x = new double[b2.n]; b2.y = new double[b2.n]; b2.w = new double[b2.n]; for(i_=0; i_<=b2.n-1;i_++) { b2.x[i_] = b.x[i_]; } for(i_=0; i_<=b2.n-1;i_++) { b2.y[i_] = b.y[i_]; } for(i_=0; i_<=b2.n-1;i_++) { b2.w[i_] = b.w[i_]; } } /************************************************************************* Normalization of barycentric interpolant: * B.N, B.X, B.Y and B.W are initialized * B.SY is NOT initialized * Y[] is normalized, scaling coefficient is stored in B.SY * W[] is normalized, no scaling coefficient is stored * X[] is sorted Internal subroutine. *************************************************************************/ private static void barycentricnormalize(barycentricinterpolant b, alglib.xparams _params) { int[] p1 = new int[0]; int[] p2 = new int[0]; int i = 0; int j = 0; int j2 = 0; double v = 0; int i_ = 0; // // Normalize task: |Y|<=1, |W|<=1, sort X[] // b.sy = 0; for(i=0; i<=b.n-1; i++) { b.sy = Math.Max(b.sy, Math.Abs(b.y[i])); } if( (double)(b.sy)>(double)(0) && (double)(Math.Abs(b.sy-1))>(double)(10*math.machineepsilon) ) { v = 1/b.sy; for(i_=0; i_<=b.n-1;i_++) { b.y[i_] = v*b.y[i_]; } } v = 0; for(i=0; i<=b.n-1; i++) { v = Math.Max(v, Math.Abs(b.w[i])); } if( (double)(v)>(double)(0) && (double)(Math.Abs(v-1))>(double)(10*math.machineepsilon) ) { v = 1/v; for(i_=0; i_<=b.n-1;i_++) { b.w[i_] = v*b.w[i_]; } } for(i=0; i<=b.n-2; i++) { if( (double)(b.x[i+1])<(double)(b.x[i]) ) { tsort.tagsort(ref b.x, b.n, ref p1, ref p2, _params); for(j=0; j<=b.n-1; j++) { j2 = p2[j]; v = b.y[j]; b.y[j] = b.y[j2]; b.y[j2] = v; v = b.w[j]; b.w[j] = b.w[j2]; b.w[j2] = v; } break; } } } } public class fitsphere { public class fitsphereinternalreport : apobject { public int nfev; public int iterationscount; public fitsphereinternalreport() { init(); } public override void init() { } public override alglib.apobject make_copy() { fitsphereinternalreport _result = new fitsphereinternalreport(); _result.nfev = nfev; _result.iterationscount = iterationscount; return _result; } }; /************************************************************************* Fits least squares (LS) circle (or NX-dimensional sphere) to data (a set of points in NX-dimensional space). Least squares circle minimizes sum of squared deviations between distances from points to the center and some "candidate" radius, which is also fitted to the data. INPUT PARAMETERS: XY - array[NPoints,NX] (or larger), contains dataset. One row = one point in NX-dimensional space. NPoints - dataset size, NPoints>0 NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) OUTPUT PARAMETERS: CX - central point for a sphere R - radius -- ALGLIB -- Copyright 07.05.2018 by Bochkanov Sergey *************************************************************************/ public static void fitspherels(double[,] xy, int npoints, int nx, ref double[] cx, ref double r, alglib.xparams _params) { double dummy = 0; cx = new double[0]; r = 0; fitspherex(xy, npoints, nx, 0, 0.0, 0, 0.0, ref cx, ref dummy, ref r, _params); } /************************************************************************* Fits minimum circumscribed (MC) circle (or NX-dimensional sphere) to data (a set of points in NX-dimensional space). INPUT PARAMETERS: XY - array[NPoints,NX] (or larger), contains dataset. One row = one point in NX-dimensional space. NPoints - dataset size, NPoints>0 NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) OUTPUT PARAMETERS: CX - central point for a sphere RHi - radius NOTE: this function is an easy-to-use wrapper around more powerful "expert" function fitspherex(). This wrapper is optimized for ease of use and stability - at the cost of somewhat lower performance (we have to use very tight stopping criteria for inner optimizer because we want to make sure that it will converge on any dataset). If you are ready to experiment with settings of "expert" function, you can achieve ~2-4x speedup over standard "bulletproof" settings. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void fitspheremc(double[,] xy, int npoints, int nx, ref double[] cx, ref double rhi, alglib.xparams _params) { double dummy = 0; cx = new double[0]; rhi = 0; fitspherex(xy, npoints, nx, 1, 0.0, 0, 0.0, ref cx, ref dummy, ref rhi, _params); } /************************************************************************* Fits maximum inscribed circle (or NX-dimensional sphere) to data (a set of points in NX-dimensional space). INPUT PARAMETERS: XY - array[NPoints,NX] (or larger), contains dataset. One row = one point in NX-dimensional space. NPoints - dataset size, NPoints>0 NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) OUTPUT PARAMETERS: CX - central point for a sphere RLo - radius NOTE: this function is an easy-to-use wrapper around more powerful "expert" function fitspherex(). This wrapper is optimized for ease of use and stability - at the cost of somewhat lower performance (we have to use very tight stopping criteria for inner optimizer because we want to make sure that it will converge on any dataset). If you are ready to experiment with settings of "expert" function, you can achieve ~2-4x speedup over standard "bulletproof" settings. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void fitspheremi(double[,] xy, int npoints, int nx, ref double[] cx, ref double rlo, alglib.xparams _params) { double dummy = 0; cx = new double[0]; rlo = 0; fitspherex(xy, npoints, nx, 2, 0.0, 0, 0.0, ref cx, ref rlo, ref dummy, _params); } /************************************************************************* Fits minimum zone circle (or NX-dimensional sphere) to data (a set of points in NX-dimensional space). INPUT PARAMETERS: XY - array[NPoints,NX] (or larger), contains dataset. One row = one point in NX-dimensional space. NPoints - dataset size, NPoints>0 NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) OUTPUT PARAMETERS: CX - central point for a sphere RLo - radius of inscribed circle RHo - radius of circumscribed circle NOTE: this function is an easy-to-use wrapper around more powerful "expert" function fitspherex(). This wrapper is optimized for ease of use and stability - at the cost of somewhat lower performance (we have to use very tight stopping criteria for inner optimizer because we want to make sure that it will converge on any dataset). If you are ready to experiment with settings of "expert" function, you can achieve ~2-4x speedup over standard "bulletproof" settings. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void fitspheremz(double[,] xy, int npoints, int nx, ref double[] cx, ref double rlo, ref double rhi, alglib.xparams _params) { cx = new double[0]; rlo = 0; rhi = 0; fitspherex(xy, npoints, nx, 3, 0.0, 0, 0.0, ref cx, ref rlo, ref rhi, _params); } /************************************************************************* Fitting minimum circumscribed, maximum inscribed or minimum zone circles (or NX-dimensional spheres) to data (a set of points in NX-dimensional space). This is expert function which allows to tweak many parameters of underlying nonlinear solver: * stopping criteria for inner iterations * number of outer iterations * penalty coefficient used to handle nonlinear constraints (we convert unconstrained nonsmooth optimization problem ivolving max() and/or min() operations to quadratically constrained smooth one). You may tweak all these parameters or only some of them, leaving other ones at their default state - just specify zero value, and solver will fill it with appropriate default one. These comments also include some discussion of approach used to handle such unusual fitting problem, its stability, drawbacks of alternative methods, and convergence properties. INPUT PARAMETERS: XY - array[NPoints,NX] (or larger), contains dataset. One row = one point in NX-dimensional space. NPoints - dataset size, NPoints>0 NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) ProblemType-used to encode problem type: * 0 for least squares circle * 1 for minimum circumscribed circle/sphere fitting (MC) * 2 for maximum inscribed circle/sphere fitting (MI) * 3 for minimum zone circle fitting (difference between Rhi and Rlo is minimized), denoted as MZ EpsX - stopping condition for NLC optimizer: * must be non-negative * use 0 to choose default value (1.0E-12 is used by default) * you may specify larger values, up to 1.0E-6, if you want to speed-up solver; NLC solver performs several preconditioned outer iterations, so final result typically has precision much better than EpsX. AULIts - number of outer iterations performed by NLC optimizer: * must be non-negative * use 0 to choose default value (20 is used by default) * you may specify values smaller than 20 if you want to speed up solver; 10 often results in good combination of precision and speed; sometimes you may get good results with just 6 outer iterations. Ignored for ProblemType=0. Penalty - penalty coefficient for NLC optimizer: * must be non-negative * use 0 to choose default value (1.0E6 in current version) * it should be really large, 1.0E6...1.0E7 is a good value to start from; * generally, default value is good enough Ignored for ProblemType=0. OUTPUT PARAMETERS: CX - central point for a sphere RLo - radius: * for ProblemType=2,3, radius of the inscribed sphere * for ProblemType=0 - radius of the least squares sphere * for ProblemType=1 - zero RHo - radius: * for ProblemType=1,3, radius of the circumscribed sphere * for ProblemType=0 - radius of the least squares sphere * for ProblemType=2 - zero NOTE: ON THE UNIQUENESS OF SOLUTIONS ALGLIB provides solution to several related circle fitting problems: MC (minimum circumscribed), MI (maximum inscribed) and MZ (minimum zone) fitting, LS (least squares) fitting. It is important to note that among these problems only MC and LS are convex and have unique solution independently from starting point. As for MI, it may (or may not, depending on dataset properties) have multiple solutions, and it always has one degenerate solution C=infinity which corresponds to infinitely large radius. Thus, there are no guarantees that solution to MI returned by this solver will be the best one (and no one can provide you with such guarantee because problem is NP-hard). The only guarantee you have is that this solution is locally optimal, i.e. it can not be improved by infinitesimally small tweaks in the parameters. It is also possible to "run away" to infinity when started from bad initial point located outside of point cloud (or when point cloud does not span entire circumference/surface of the sphere). Finally, MZ (minimum zone circle) stands somewhere between MC and MI in stability. It is somewhat regularized by "circumscribed" term of the merit function; however, solutions to MZ may be non-unique, and in some unlucky cases it is also possible to "run away to infinity". NOTE: ON THE NONLINEARLY CONSTRAINED PROGRAMMING APPROACH The problem formulation for MC (minimum circumscribed circle; for the sake of simplicity we omit MZ and MI here) is: [ [ ]2 ] min [ max [ XY[i]-C ] ] C [ i [ ] ] i.e. it is unconstrained nonsmooth optimization problem of finding "best" central point, with radius R being unambiguously determined from C. In order to move away from non-smoothness we use following reformulation: [ ] [ ]2 min [ R ] subject to R>=0, [ XY[i]-C ] <= R^2 C,R [ ] [ ] i.e. it becomes smooth quadratically constrained optimization problem with linear target function. Such problem statement is 100% equivalent to the original nonsmooth one, but much easier to approach. We solve it with MinNLC solver provided by ALGLIB. NOTE: ON INSTABILITY OF SEQUENTIAL LINEARIZATION APPROACH ALGLIB has nonlinearly constrained solver which proved to be stable on such problems. However, some authors proposed to linearize constraints in the vicinity of current approximation (Ci,Ri) and to get next approximate solution (Ci+1,Ri+1) as solution to linear programming problem. Obviously, LP problems are easier than nonlinearly constrained ones. Indeed, such approach to MC/MI/MZ resulted in ~10-20x increase in performance (when compared with NLC solver). However, it turned out that in some cases linearized model fails to predict correct direction for next step and tells us that we converged to solution even when we are still 2-4 digits of precision away from it. It is important that it is not failure of LP solver - it is failure of the linear model; even when solved exactly, it fails to handle subtle nonlinearities which arise near the solution. We validated it by comparing results returned by ALGLIB linear solver with that of MATLAB. In our experiments with linearization: * MC failed most often, at both realistic and synthetic datasets * MI sometimes failed, but sometimes succeeded * MZ often succeeded; our guess is that presence of two independent sets of constraints (one set for Rlo and another one for Rhi) and two terms in the target function (Rlo and Rhi) regularizes task, so when linear model fails to handle nonlinearities from Rlo, it uses Rhi as a hint (and vice versa). Because linearization approach failed to achieve stable results, we do not include it in ALGLIB. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void fitspherex(double[,] xy, int npoints, int nx, int problemtype, double epsx, int aulits, double penalty, ref double[] cx, ref double rlo, ref double rhi, alglib.xparams _params) { fitsphereinternalreport rep = new fitsphereinternalreport(); cx = new double[0]; rlo = 0; rhi = 0; alglib.ap.assert(math.isfinite(penalty) && (double)(penalty)>=(double)(0), "FitSphereX: Penalty<0 or is not finite"); alglib.ap.assert(math.isfinite(epsx) && (double)(epsx)>=(double)(0), "FitSphereX: EpsX<0 or is not finite"); alglib.ap.assert(aulits>=0, "FitSphereX: AULIts<0"); fitsphereinternal(xy, npoints, nx, problemtype, 0, epsx, aulits, penalty, ref cx, ref rlo, ref rhi, rep, _params); } /************************************************************************* Fitting minimum circumscribed, maximum inscribed or minimum zone circles (or NX-dimensional spheres) to data (a set of points in NX-dimensional space). Internal computational function. INPUT PARAMETERS: XY - array[NPoints,NX] (or larger), contains dataset. One row = one point in NX-dimensional space. NPoints - dataset size, NPoints>0 NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) ProblemType-used to encode problem type: * 0 for least squares circle * 1 for minimum circumscribed circle/sphere fitting (MC) * 2 for maximum inscribed circle/sphere fitting (MI) * 3 for minimum zone circle fitting (difference between Rhi and Rlo is minimized), denoted as MZ SolverType- solver to use: * 0 use best solver available (1 in current version) * 1 use nonlinearly constrained optimization approach, AUL (it is roughly 10-20 times slower than SPC-LIN, but much more stable) * 2 use special fast IMPRECISE solver, SPC-LIN sequential linearization approach; SPC-LIN is fast, but sometimes fails to converge with more than 3 digits of precision; see comments below. NOT RECOMMENDED UNLESS YOU REALLY NEED HIGH PERFORMANCE AT THE COST OF SOME PRECISION. * 3 use nonlinearly constrained optimization approach, SLP (most robust one, but somewhat slower than AUL) Ignored for ProblemType=0. EpsX - stopping criteria for SLP and NLC optimizers: * must be non-negative * use 0 to choose default value (1.0E-12 is used by default) * if you use SLP solver, you should use default values * if you use NLC solver, you may specify larger values, up to 1.0E-6, if you want to speed-up solver; NLC solver performs several preconditioned outer iterations, so final result typically has precision much better than EpsX. AULIts - number of iterations performed by NLC optimizer: * must be non-negative * use 0 to choose default value (20 is used by default) * you may specify values smaller than 20 if you want to speed up solver; 10 often results in good combination of precision and speed Ignored for ProblemType=0. Penalty - penalty coefficient for NLC optimizer (ignored for SLP): * must be non-negative * use 0 to choose default value (1.0E6 in current version) * it should be really large, 1.0E6...1.0E7 is a good value to start from; * generally, default value is good enough * ignored by SLP optimizer Ignored for ProblemType=0. OUTPUT PARAMETERS: CX - central point for a sphere RLo - radius: * for ProblemType=2,3, radius of the inscribed sphere * for ProblemType=0 - radius of the least squares sphere * for ProblemType=1 - zero RHo - radius: * for ProblemType=1,3, radius of the circumscribed sphere * for ProblemType=0 - radius of the least squares sphere * for ProblemType=2 - zero -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void fitsphereinternal(double[,] xy, int npoints, int nx, int problemtype, int solvertype, double epsx, int aulits, double penalty, ref double[] cx, ref double rlo, ref double rhi, fitsphereinternalreport rep, alglib.xparams _params) { int i = 0; int j = 0; double v = 0; double vv = 0; int cpr = 0; bool userlo = new bool(); bool userhi = new bool(); double vlo = 0; double vhi = 0; double[] vmin = new double[0]; double[] vmax = new double[0]; double spread = 0; double[] pcr = new double[0]; double[] scr = new double[0]; double[] bl = new double[0]; double[] bu = new double[0]; int suboffset = 0; int dstrow = 0; minnlc.minnlcstate nlcstate = new minnlc.minnlcstate(); minnlc.minnlcreport nlcrep = new minnlc.minnlcreport(); double[,] cmatrix = new double[0,0]; int[] ct = new int[0]; int outeridx = 0; int maxouterits = 0; int maxits = 0; double safeguard = 0; double bi = 0; minbleic.minbleicstate blcstate = new minbleic.minbleicstate(); minbleic.minbleicreport blcrep = new minbleic.minbleicreport(); double[] prevc = new double[0]; minlm.minlmstate lmstate = new minlm.minlmstate(); minlm.minlmreport lmrep = new minlm.minlmreport(); cx = new double[0]; rlo = 0; rhi = 0; // // Check input parameters // alglib.ap.assert(npoints>0, "FitSphereX: NPoints<=0"); alglib.ap.assert(nx>0, "FitSphereX: NX<=0"); alglib.ap.assert(apserv.apservisfinitematrix(xy, npoints, nx, _params), "FitSphereX: XY contains infinite or NAN values"); alglib.ap.assert(problemtype>=0 && problemtype<=3, "FitSphereX: ProblemType is neither 0, 1, 2 or 3"); alglib.ap.assert(solvertype>=0 && solvertype<=3, "FitSphereX: ProblemType is neither 1, 2 or 3"); alglib.ap.assert(math.isfinite(penalty) && (double)(penalty)>=(double)(0), "FitSphereX: Penalty<0 or is not finite"); alglib.ap.assert(math.isfinite(epsx) && (double)(epsx)>=(double)(0), "FitSphereX: EpsX<0 or is not finite"); alglib.ap.assert(aulits>=0, "FitSphereX: AULIts<0"); if( solvertype==0 ) { solvertype = 1; } if( (double)(penalty)==(double)(0) ) { penalty = 1.0E6; } if( (double)(epsx)==(double)(0) ) { epsx = 1.0E-12; } if( aulits==0 ) { aulits = 20; } safeguard = 10; maxouterits = 10; maxits = 10000; rep.nfev = 0; rep.iterationscount = 0; // // Determine initial values, initial estimates and spread of the points // vmin = new double[nx]; vmax = new double[nx]; cx = new double[nx]; for(j=0; j<=nx-1; j++) { vmin[j] = xy[0,j]; vmax[j] = xy[0,j]; cx[j] = 0; } for(i=0; i<=npoints-1; i++) { for(j=0; j<=nx-1; j++) { cx[j] = cx[j]+xy[i,j]; vmin[j] = Math.Min(vmin[j], xy[i,j]); vmax[j] = Math.Max(vmax[j], xy[i,j]); } } spread = 0; for(j=0; j<=nx-1; j++) { cx[j] = cx[j]/npoints; spread = Math.Max(spread, vmax[j]-vmin[j]); } rlo = math.maxrealnumber; rhi = 0; for(i=0; i<=npoints-1; i++) { v = 0; for(j=0; j<=nx-1; j++) { v = v+math.sqr(xy[i,j]-cx[j]); } v = Math.Sqrt(v); rhi = Math.Max(rhi, v); rlo = Math.Min(rlo, v); } // // Handle degenerate case of zero spread // if( (double)(spread)==(double)(0) ) { for(j=0; j<=nx-1; j++) { cx[j] = vmin[j]; } rhi = 0; rlo = 0; return; } // // Prepare initial point for optimizer, scale vector and box constraints // pcr = new double[nx+2]; scr = new double[nx+2]; bl = new double[nx+2]; bu = new double[nx+2]; for(j=0; j<=nx-1; j++) { pcr[j] = cx[j]; scr[j] = 0.1*spread; bl[j] = cx[j]-safeguard*spread; bu[j] = cx[j]+safeguard*spread; } pcr[nx+0] = rlo; pcr[nx+1] = rhi; scr[nx+0] = 0.5*spread; scr[nx+1] = 0.5*spread; bl[nx+0] = 0; bl[nx+1] = 0; bu[nx+0] = safeguard*rhi; bu[nx+1] = safeguard*rhi; // // First branch: least squares fitting vs MI/MC/MZ fitting // if( problemtype==0 ) { // // Solve problem with Levenberg-Marquardt algorithm // pcr[nx] = rhi; minlm.minlmcreatevj(nx+1, npoints, pcr, lmstate, _params); minlm.minlmsetscale(lmstate, scr, _params); minlm.minlmsetbc(lmstate, bl, bu, _params); minlm.minlmsetcond(lmstate, epsx, maxits, _params); while( minlm.minlmiteration(lmstate, _params) ) { if( lmstate.needfij || lmstate.needfi ) { apserv.inc(ref rep.nfev, _params); for(i=0; i<=npoints-1; i++) { v = 0; for(j=0; j<=nx-1; j++) { v = v+math.sqr(lmstate.x[j]-xy[i,j]); } lmstate.fi[i] = Math.Sqrt(v)-lmstate.x[nx]; if( lmstate.needfij ) { for(j=0; j<=nx-1; j++) { lmstate.j[i,j] = 0.5/(1.0E-9*spread+Math.Sqrt(v))*2*(lmstate.x[j]-xy[i,j]); } lmstate.j[i,nx] = -1; } } continue; } alglib.ap.assert(false); } minlm.minlmresults(lmstate, ref pcr, lmrep, _params); alglib.ap.assert(lmrep.terminationtype>0, "FitSphereX: unexpected failure of LM solver"); rep.iterationscount = rep.iterationscount+lmrep.iterationscount; // // Offload center coordinates from PCR to CX, // re-calculate exact value of RLo/RHi using CX. // for(j=0; j<=nx-1; j++) { cx[j] = pcr[j]; } vv = 0; for(i=0; i<=npoints-1; i++) { v = 0; for(j=0; j<=nx-1; j++) { v = v+math.sqr(xy[i,j]-cx[j]); } v = Math.Sqrt(v); vv = vv+v/npoints; } rlo = vv; rhi = vv; } else { // // MI, MC, MZ fitting. // Prepare problem metrics // userlo = problemtype==2 || problemtype==3; userhi = problemtype==1 || problemtype==3; if( userlo && userhi ) { cpr = 2; } else { cpr = 1; } if( userlo ) { vlo = 1; } else { vlo = 0; } if( userhi ) { vhi = 1; } else { vhi = 0; } // // Solve with NLC solver; problem is treated as general nonlinearly constrained // programming, with augmented Lagrangian solver or SLP being used. // if( solvertype==1 || solvertype==3 ) { minnlc.minnlccreate(nx+2, pcr, nlcstate, _params); minnlc.minnlcsetscale(nlcstate, scr, _params); minnlc.minnlcsetbc(nlcstate, bl, bu, _params); minnlc.minnlcsetnlc(nlcstate, 0, cpr*npoints, _params); minnlc.minnlcsetcond(nlcstate, epsx, maxits, _params); minnlc.minnlcsetprecexactrobust(nlcstate, 5, _params); minnlc.minnlcsetstpmax(nlcstate, 0.1, _params); if( solvertype==1 ) { minnlc.minnlcsetalgoaul(nlcstate, penalty, aulits, _params); } else { minnlc.minnlcsetalgoslp(nlcstate, _params); } minnlc.minnlcrestartfrom(nlcstate, pcr, _params); while( minnlc.minnlciteration(nlcstate, _params) ) { if( nlcstate.needfij ) { apserv.inc(ref rep.nfev, _params); nlcstate.fi[0] = vhi*nlcstate.x[nx+1]-vlo*nlcstate.x[nx+0]; for(j=0; j<=nx-1; j++) { nlcstate.j[0,j] = 0; } nlcstate.j[0,nx+0] = -(1*vlo); nlcstate.j[0,nx+1] = 1*vhi; for(i=0; i<=npoints-1; i++) { suboffset = 0; if( userhi ) { dstrow = 1+cpr*i+suboffset; v = 0; for(j=0; j<=nx-1; j++) { vv = nlcstate.x[j]-xy[i,j]; v = v+vv*vv; nlcstate.j[dstrow,j] = 2*vv; } vv = nlcstate.x[nx+1]; v = v-vv*vv; nlcstate.j[dstrow,nx+0] = 0; nlcstate.j[dstrow,nx+1] = -(2*vv); nlcstate.fi[dstrow] = v; apserv.inc(ref suboffset, _params); } if( userlo ) { dstrow = 1+cpr*i+suboffset; v = 0; for(j=0; j<=nx-1; j++) { vv = nlcstate.x[j]-xy[i,j]; v = v-vv*vv; nlcstate.j[dstrow,j] = -(2*vv); } vv = nlcstate.x[nx+0]; v = v+vv*vv; nlcstate.j[dstrow,nx+0] = 2*vv; nlcstate.j[dstrow,nx+1] = 0; nlcstate.fi[dstrow] = v; apserv.inc(ref suboffset, _params); } alglib.ap.assert(suboffset==cpr); } continue; } alglib.ap.assert(false); } minnlc.minnlcresults(nlcstate, ref pcr, nlcrep, _params); alglib.ap.assert(nlcrep.terminationtype>0, "FitSphereX: unexpected failure of NLC solver"); rep.iterationscount = rep.iterationscount+nlcrep.iterationscount; // // Offload center coordinates from PCR to CX, // re-calculate exact value of RLo/RHi using CX. // for(j=0; j<=nx-1; j++) { cx[j] = pcr[j]; } rlo = math.maxrealnumber; rhi = 0; for(i=0; i<=npoints-1; i++) { v = 0; for(j=0; j<=nx-1; j++) { v = v+math.sqr(xy[i,j]-cx[j]); } v = Math.Sqrt(v); rhi = Math.Max(rhi, v); rlo = Math.Min(rlo, v); } if( !userlo ) { rlo = 0; } if( !userhi ) { rhi = 0; } return; } // // Solve problem with SLP (sequential LP) approach; this approach // is much faster than NLP, but often fails for MI and MC (for MZ // it performs well enough). // // REFERENCE: "On a sequential linear programming approach to finding // the smallest circumscribed, largest inscribed, and minimum // zone circle or sphere", Helmuth Spath and G.A.Watson // if( solvertype==2 ) { cmatrix = new double[cpr*npoints, nx+3]; ct = new int[cpr*npoints]; prevc = new double[nx]; minbleic.minbleiccreate(nx+2, pcr, blcstate, _params); minbleic.minbleicsetscale(blcstate, scr, _params); minbleic.minbleicsetbc(blcstate, bl, bu, _params); minbleic.minbleicsetcond(blcstate, 0, 0, epsx, maxits, _params); for(outeridx=0; outeridx<=maxouterits-1; outeridx++) { // // Prepare initial point for algorithm; center coordinates at // PCR are used to calculate RLo/RHi and update PCR with them. // rlo = math.maxrealnumber; rhi = 0; for(i=0; i<=npoints-1; i++) { v = 0; for(j=0; j<=nx-1; j++) { v = v+math.sqr(xy[i,j]-pcr[j]); } v = Math.Sqrt(v); rhi = Math.Max(rhi, v); rlo = Math.Min(rlo, v); } pcr[nx+0] = rlo*0.99999; pcr[nx+1] = rhi/0.99999; // // Generate matrix of linear constraints // for(i=0; i<=npoints-1; i++) { v = 0; for(j=0; j<=nx-1; j++) { v = v+math.sqr(xy[i,j]); } bi = -(v/2); suboffset = 0; if( userhi ) { dstrow = cpr*i+suboffset; for(j=0; j<=nx-1; j++) { cmatrix[dstrow,j] = pcr[j]/2-xy[i,j]; } cmatrix[dstrow,nx+0] = 0; cmatrix[dstrow,nx+1] = -(rhi/2); cmatrix[dstrow,nx+2] = bi; ct[dstrow] = -1; apserv.inc(ref suboffset, _params); } if( userlo ) { dstrow = cpr*i+suboffset; for(j=0; j<=nx-1; j++) { cmatrix[dstrow,j] = -(pcr[j]/2-xy[i,j]); } cmatrix[dstrow,nx+0] = rlo/2; cmatrix[dstrow,nx+1] = 0; cmatrix[dstrow,nx+2] = -bi; ct[dstrow] = -1; apserv.inc(ref suboffset, _params); } alglib.ap.assert(suboffset==cpr); } // // Solve LP subproblem with MinBLEIC // for(j=0; j<=nx-1; j++) { prevc[j] = pcr[j]; } minbleic.minbleicsetlc(blcstate, cmatrix, ct, cpr*npoints, _params); minbleic.minbleicrestartfrom(blcstate, pcr, _params); while( minbleic.minbleiciteration(blcstate, _params) ) { if( blcstate.needfg ) { apserv.inc(ref rep.nfev, _params); blcstate.f = vhi*blcstate.x[nx+1]-vlo*blcstate.x[nx+0]; for(j=0; j<=nx-1; j++) { blcstate.g[j] = 0; } blcstate.g[nx+0] = -(1*vlo); blcstate.g[nx+1] = 1*vhi; continue; } } minbleic.minbleicresults(blcstate, ref pcr, blcrep, _params); alglib.ap.assert(blcrep.terminationtype>0, "FitSphereX: unexpected failure of BLEIC solver"); rep.iterationscount = rep.iterationscount+blcrep.iterationscount; // // Terminate iterations early if we converged // v = 0; for(j=0; j<=nx-1; j++) { v = v+math.sqr(prevc[j]-pcr[j]); } v = Math.Sqrt(v); if( (double)(v)<=(double)(epsx) ) { break; } } // // Offload center coordinates from PCR to CX, // re-calculate exact value of RLo/RHi using CX. // for(j=0; j<=nx-1; j++) { cx[j] = pcr[j]; } rlo = math.maxrealnumber; rhi = 0; for(i=0; i<=npoints-1; i++) { v = 0; for(j=0; j<=nx-1; j++) { v = v+math.sqr(xy[i,j]-cx[j]); } v = Math.Sqrt(v); rhi = Math.Max(rhi, v); rlo = Math.Min(rlo, v); } if( !userlo ) { rlo = 0; } if( !userhi ) { rhi = 0; } return; } // // Oooops...! // alglib.ap.assert(false, "FitSphereX: integrity check failed"); } } } public class intfitserv { /************************************************************************* Internal subroutine: automatic scaling for LLS tasks. NEVER CALL IT DIRECTLY! Maps abscissas to [-1,1], standartizes ordinates and correspondingly scales constraints. It also scales weights so that max(W[i])=1 Transformations performed: * X, XC [XA,XB] => [-1,+1] transformation makes min(X)=-1, max(X)=+1 * Y [SA,SB] => [0,1] transformation makes mean(Y)=0, stddev(Y)=1 * YC transformed accordingly to SA, SB, DC[I] -- ALGLIB PROJECT -- Copyright 08.09.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitscalexy(ref double[] x, ref double[] y, ref double[] w, int n, ref double[] xc, ref double[] yc, int[] dc, int k, ref double xa, ref double xb, ref double sa, ref double sb, ref double[] xoriginal, ref double[] yoriginal, alglib.xparams _params) { double xmin = 0; double xmax = 0; int i = 0; double mx = 0; int i_ = 0; xa = 0; xb = 0; sa = 0; sb = 0; xoriginal = new double[0]; yoriginal = new double[0]; alglib.ap.assert(n>=1, "LSFitScaleXY: incorrect N"); alglib.ap.assert(k>=0, "LSFitScaleXY: incorrect K"); xmin = x[0]; xmax = x[0]; for(i=1; i<=n-1; i++) { xmin = Math.Min(xmin, x[i]); xmax = Math.Max(xmax, x[i]); } for(i=0; i<=k-1; i++) { xmin = Math.Min(xmin, xc[i]); xmax = Math.Max(xmax, xc[i]); } if( (double)(xmin)==(double)(xmax) ) { if( (double)(xmin)==(double)(0) ) { xmin = -1; xmax = 1; } else { if( (double)(xmin)>(double)(0) ) { xmin = 0.5*xmin; } else { xmax = 0.5*xmax; } } } xoriginal = new double[n]; for(i_=0; i_<=n-1;i_++) { xoriginal[i_] = x[i_]; } xa = xmin; xb = xmax; for(i=0; i<=n-1; i++) { x[i] = 2*(x[i]-0.5*(xa+xb))/(xb-xa); } for(i=0; i<=k-1; i++) { alglib.ap.assert(dc[i]>=0, "LSFitScaleXY: internal error!"); xc[i] = 2*(xc[i]-0.5*(xa+xb))/(xb-xa); yc[i] = yc[i]*Math.Pow(0.5*(xb-xa), dc[i]); } yoriginal = new double[n]; for(i_=0; i_<=n-1;i_++) { yoriginal[i_] = y[i_]; } sa = 0; for(i=0; i<=n-1; i++) { sa = sa+y[i]; } sa = sa/n; sb = 0; for(i=0; i<=n-1; i++) { sb = sb+math.sqr(y[i]-sa); } sb = Math.Sqrt(sb/n)+sa; if( (double)(sb)==(double)(sa) ) { sb = 2*sa; } if( (double)(sb)==(double)(sa) ) { sb = sa+1; } for(i=0; i<=n-1; i++) { y[i] = (y[i]-sa)/(sb-sa); } for(i=0; i<=k-1; i++) { if( dc[i]==0 ) { yc[i] = (yc[i]-sa)/(sb-sa); } else { yc[i] = yc[i]/(sb-sa); } } mx = 0; for(i=0; i<=n-1; i++) { mx = Math.Max(mx, Math.Abs(w[i])); } if( (double)(mx)!=(double)(0) ) { for(i=0; i<=n-1; i++) { w[i] = w[i]/mx; } } } public static void buildpriorterm(double[,] xy, int n, int nx, int ny, int modeltype, double priorval, ref double[,] v, alglib.xparams _params) { int i = 0; int j = 0; int j0 = 0; int j1 = 0; double rj = 0; double[,] araw = new double[0,0]; double[,] amod = new double[0,0]; double[,] braw = new double[0,0]; double[] tmp0 = new double[0]; double lambdareg = 0; int rfsits = 0; v = new double[0,0]; alglib.ap.assert(n>=0, "BuildPriorTerm: N<0"); alglib.ap.assert(nx>0, "BuildPriorTerm: NX<=0"); alglib.ap.assert(ny>0, "BuildPriorTerm: NY<=0"); v = new double[ny, nx+1]; for(i=0; i<=alglib.ap.rows(v)-1; i++) { for(j=0; j<=alglib.ap.cols(v)-1; j++) { v[i,j] = 0; } } if( n==0 ) { if( modeltype==0 ) { for(i=0; i<=ny-1; i++) { v[i,nx] = priorval; } return; } if( modeltype==1 ) { return; } if( modeltype==2 ) { return; } if( modeltype==3 ) { return; } alglib.ap.assert(false, "BuildPriorTerm: unexpected model type"); } if( modeltype==0 ) { for(i=0; i<=ny-1; i++) { v[i,nx] = priorval; } for(i=0; i<=n-1; i++) { for(j=0; j<=ny-1; j++) { xy[i,nx+j] = xy[i,nx+j]-priorval; } } return; } if( modeltype==2 ) { for(i=0; i<=n-1; i++) { for(j=0; j<=ny-1; j++) { v[j,nx] = v[j,nx]+xy[i,nx+j]; } } for(j=0; j<=ny-1; j++) { v[j,nx] = v[j,nx]/apserv.coalesce(n, 1, _params); } for(i=0; i<=n-1; i++) { for(j=0; j<=ny-1; j++) { xy[i,nx+j] = xy[i,nx+j]-v[j,nx]; } } return; } if( modeltype==3 ) { return; } alglib.ap.assert(modeltype==1, "BuildPriorTerm: unexpected model type"); lambdareg = 0.0; araw = new double[nx+1, nx+1]; braw = new double[nx+1, ny]; tmp0 = new double[nx+1]; amod = new double[nx+1, nx+1]; for(i=0; i<=nx; i++) { for(j=0; j<=nx; j++) { araw[i,j] = 0; } } for(i=0; i<=n-1; i++) { for(j=0; j<=nx-1; j++) { tmp0[j] = xy[i,j]; } tmp0[nx] = 1.0; for(j0=0; j0<=nx; j0++) { for(j1=0; j1<=nx; j1++) { araw[j0,j1] = araw[j0,j1]+tmp0[j0]*tmp0[j1]; } } } for(rfsits=1; rfsits<=3; rfsits++) { for(i=0; i<=nx; i++) { for(j=0; j<=ny-1; j++) { braw[i,j] = 0; } } for(i=0; i<=n-1; i++) { for(j=0; j<=nx-1; j++) { tmp0[j] = xy[i,j]; } tmp0[nx] = 1.0; for(j=0; j<=ny-1; j++) { rj = xy[i,nx+j]; for(j0=0; j0<=nx; j0++) { rj = rj-tmp0[j0]*v[j,j0]; } for(j0=0; j0<=nx; j0++) { braw[j0,j] = braw[j0,j]+rj*tmp0[j0]; } } } while( true ) { for(i=0; i<=nx; i++) { for(j=0; j<=nx; j++) { amod[i,j] = araw[i,j]; } amod[i,i] = amod[i,i]+lambdareg*apserv.coalesce(amod[i,i], 1, _params); } if( trfac.spdmatrixcholesky(ref amod, nx+1, true, _params) ) { break; } lambdareg = apserv.coalesce(10*lambdareg, 1.0E-12, _params); } ablas.rmatrixlefttrsm(nx+1, ny, amod, 0, 0, true, false, 1, braw, 0, 0, _params); ablas.rmatrixlefttrsm(nx+1, ny, amod, 0, 0, true, false, 0, braw, 0, 0, _params); for(i=0; i<=nx; i++) { for(j=0; j<=ny-1; j++) { v[j,i] = v[j,i]+braw[i,j]; } } } for(i=0; i<=n-1; i++) { for(j=0; j<=nx-1; j++) { tmp0[j] = xy[i,j]; } tmp0[nx] = 1.0; for(j=0; j<=ny-1; j++) { rj = 0.0; for(j0=0; j0<=nx; j0++) { rj = rj+tmp0[j0]*v[j,j0]; } xy[i,nx+j] = xy[i,nx+j]-rj; } } } public static void buildpriorterm1(double[] xy1, int n, int nx, int ny, int modeltype, double priorval, ref double[,] v, alglib.xparams _params) { int i = 0; int j = 0; int j0 = 0; int j1 = 0; int ew = 0; double rj = 0; double[,] araw = new double[0,0]; double[,] amod = new double[0,0]; double[,] braw = new double[0,0]; double[] tmp0 = new double[0]; double lambdareg = 0; int rfsits = 0; v = new double[0,0]; alglib.ap.assert(n>=0, "BuildPriorTerm: N<0"); alglib.ap.assert(nx>0, "BuildPriorTerm: NX<=0"); alglib.ap.assert(ny>0, "BuildPriorTerm: NY<=0"); ew = nx+ny; v = new double[ny, nx+1]; for(i=0; i<=alglib.ap.rows(v)-1; i++) { for(j=0; j<=alglib.ap.cols(v)-1; j++) { v[i,j] = 0; } } if( n==0 ) { if( modeltype==0 ) { for(i=0; i<=ny-1; i++) { v[i,nx] = priorval; } return; } if( modeltype==1 ) { return; } if( modeltype==2 ) { return; } if( modeltype==3 ) { return; } alglib.ap.assert(false, "BuildPriorTerm: unexpected model type"); } if( modeltype==0 ) { for(i=0; i<=ny-1; i++) { v[i,nx] = priorval; } for(i=0; i<=n-1; i++) { for(j=0; j<=ny-1; j++) { xy1[i*ew+nx+j] = xy1[i*ew+nx+j]-priorval; } } return; } if( modeltype==2 ) { for(i=0; i<=n-1; i++) { for(j=0; j<=ny-1; j++) { v[j,nx] = v[j,nx]+xy1[i*ew+nx+j]; } } for(j=0; j<=ny-1; j++) { v[j,nx] = v[j,nx]/apserv.coalesce(n, 1, _params); } for(i=0; i<=n-1; i++) { for(j=0; j<=ny-1; j++) { xy1[i*ew+nx+j] = xy1[i*ew+nx+j]-v[j,nx]; } } return; } if( modeltype==3 ) { return; } alglib.ap.assert(modeltype==1, "BuildPriorTerm: unexpected model type"); lambdareg = 0.0; araw = new double[nx+1, nx+1]; braw = new double[nx+1, ny]; tmp0 = new double[nx+1]; amod = new double[nx+1, nx+1]; for(i=0; i<=nx; i++) { for(j=0; j<=nx; j++) { araw[i,j] = 0; } } for(i=0; i<=n-1; i++) { for(j=0; j<=nx-1; j++) { tmp0[j] = xy1[i*ew+j]; } tmp0[nx] = 1.0; for(j0=0; j0<=nx; j0++) { for(j1=0; j1<=nx; j1++) { araw[j0,j1] = araw[j0,j1]+tmp0[j0]*tmp0[j1]; } } } for(rfsits=1; rfsits<=3; rfsits++) { for(i=0; i<=nx; i++) { for(j=0; j<=ny-1; j++) { braw[i,j] = 0; } } for(i=0; i<=n-1; i++) { for(j=0; j<=nx-1; j++) { tmp0[j] = xy1[i*ew+j]; } tmp0[nx] = 1.0; for(j=0; j<=ny-1; j++) { rj = xy1[i*ew+nx+j]; for(j0=0; j0<=nx; j0++) { rj = rj-tmp0[j0]*v[j,j0]; } for(j0=0; j0<=nx; j0++) { braw[j0,j] = braw[j0,j]+rj*tmp0[j0]; } } } while( true ) { for(i=0; i<=nx; i++) { for(j=0; j<=nx; j++) { amod[i,j] = araw[i,j]; } amod[i,i] = amod[i,i]+lambdareg*apserv.coalesce(amod[i,i], 1, _params); } if( trfac.spdmatrixcholesky(ref amod, nx+1, true, _params) ) { break; } lambdareg = apserv.coalesce(10*lambdareg, 1.0E-12, _params); } ablas.rmatrixlefttrsm(nx+1, ny, amod, 0, 0, true, false, 1, braw, 0, 0, _params); ablas.rmatrixlefttrsm(nx+1, ny, amod, 0, 0, true, false, 0, braw, 0, 0, _params); for(i=0; i<=nx; i++) { for(j=0; j<=ny-1; j++) { v[j,i] = v[j,i]+braw[i,j]; } } } for(i=0; i<=n-1; i++) { for(j=0; j<=nx-1; j++) { tmp0[j] = xy1[i*ew+j]; } tmp0[nx] = 1.0; for(j=0; j<=ny-1; j++) { rj = 0.0; for(j0=0; j0<=nx; j0++) { rj = rj+tmp0[j0]*v[j,j0]; } xy1[i*ew+nx+j] = xy1[i*ew+nx+j]-rj; } } } } public class spline1d { /************************************************************************* 1-dimensional spline interpolant *************************************************************************/ public class spline1dinterpolant : apobject { public bool periodic; public int n; public int k; public int continuity; public double[] x; public double[] c; public spline1dinterpolant() { init(); } public override void init() { x = new double[0]; c = new double[0]; } public override alglib.apobject make_copy() { spline1dinterpolant _result = new spline1dinterpolant(); _result.periodic = periodic; _result.n = n; _result.k = k; _result.continuity = continuity; _result.x = (double[])x.Clone(); _result.c = (double[])c.Clone(); return _result; } }; /************************************************************************* Spline fitting report: RMSError RMS error AvgError average error AvgRelError average relative error (for non-zero Y[I]) MaxError maximum error Fields below are filled by obsolete functions (Spline1DFitCubic, Spline1DFitHermite). Modern fitting functions do NOT fill these fields: TaskRCond reciprocal of task's condition number *************************************************************************/ public class spline1dfitreport : apobject { public double taskrcond; public double rmserror; public double avgerror; public double avgrelerror; public double maxerror; public spline1dfitreport() { init(); } public override void init() { } public override alglib.apobject make_copy() { spline1dfitreport _result = new spline1dfitreport(); _result.taskrcond = taskrcond; _result.rmserror = rmserror; _result.avgerror = avgerror; _result.avgrelerror = avgrelerror; _result.maxerror = maxerror; return _result; } }; /************************************************************************* This subroutine builds linear spline interpolant INPUT PARAMETERS: X - spline nodes, array[0..N-1] Y - function values, array[0..N-1] N - points count (optional): * N>=2 * if given, only first N points are used to build spline * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) OUTPUT PARAMETERS: C - spline interpolant ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. -- ALGLIB PROJECT -- Copyright 24.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1dbuildlinear(double[] x, double[] y, int n, spline1dinterpolant c, alglib.xparams _params) { int i = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); alglib.ap.assert(n>1, "Spline1DBuildLinear: N<2!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DBuildLinear: Length(X)=n, "Spline1DBuildLinear: Length(Y)=2 * if given, only first N points are used to build spline * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) BoundLType - boundary condition type for the left boundary BoundL - left boundary condition (first or second derivative, depending on the BoundLType) BoundRType - boundary condition type for the right boundary BoundR - right boundary condition (first or second derivative, depending on the BoundRType) OUTPUT PARAMETERS: C - spline interpolant ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. SETTING BOUNDARY VALUES: The BoundLType/BoundRType parameters can have the following values: * -1, which corresonds to the periodic (cyclic) boundary conditions. In this case: * both BoundLType and BoundRType must be equal to -1. * BoundL/BoundR are ignored * Y[last] is ignored (it is assumed to be equal to Y[first]). * 0, which corresponds to the parabolically terminated spline (BoundL and/or BoundR are ignored). * 1, which corresponds to the first derivative boundary condition * 2, which corresponds to the second derivative boundary condition * by default, BoundType=0 is used PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. However, this subroutine doesn't require you to specify equal values for the first and last points - it automatically forces them to be equal by copying Y[first_point] (corresponds to the leftmost, minimal X[]) to Y[last_point]. However it is recommended to pass consistent values of Y[], i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- Copyright 23.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1dbuildcubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, spline1dinterpolant c, alglib.xparams _params) { double[] a1 = new double[0]; double[] a2 = new double[0]; double[] a3 = new double[0]; double[] b = new double[0]; double[] dt = new double[0]; double[] d = new double[0]; int[] p = new int[0]; int ylen = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); // // check correctness of boundary conditions // alglib.ap.assert(((boundltype==-1 || boundltype==0) || boundltype==1) || boundltype==2, "Spline1DBuildCubic: incorrect BoundLType!"); alglib.ap.assert(((boundrtype==-1 || boundrtype==0) || boundrtype==1) || boundrtype==2, "Spline1DBuildCubic: incorrect BoundRType!"); alglib.ap.assert((boundrtype==-1 && boundltype==-1) || (boundrtype!=-1 && boundltype!=-1), "Spline1DBuildCubic: incorrect BoundLType/BoundRType!"); if( boundltype==1 || boundltype==2 ) { alglib.ap.assert(math.isfinite(boundl), "Spline1DBuildCubic: BoundL is infinite or NAN!"); } if( boundrtype==1 || boundrtype==2 ) { alglib.ap.assert(math.isfinite(boundr), "Spline1DBuildCubic: BoundR is infinite or NAN!"); } // // check lengths of arguments // alglib.ap.assert(n>=2, "Spline1DBuildCubic: N<2!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DBuildCubic: Length(X)=n, "Spline1DBuildCubic: Length(Y)=2 * if given, only first N points are used * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) BoundLType - boundary condition type for the left boundary BoundL - left boundary condition (first or second derivative, depending on the BoundLType) BoundRType - boundary condition type for the right boundary BoundR - right boundary condition (first or second derivative, depending on the BoundRType) OUTPUT PARAMETERS: D - derivative values at X[] ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. Derivative values are correctly reordered on return, so D[I] is always equal to S'(X[I]) independently of points order. SETTING BOUNDARY VALUES: The BoundLType/BoundRType parameters can have the following values: * -1, which corresonds to the periodic (cyclic) boundary conditions. In this case: * both BoundLType and BoundRType must be equal to -1. * BoundL/BoundR are ignored * Y[last] is ignored (it is assumed to be equal to Y[first]). * 0, which corresponds to the parabolically terminated spline (BoundL and/or BoundR are ignored). * 1, which corresponds to the first derivative boundary condition * 2, which corresponds to the second derivative boundary condition * by default, BoundType=0 is used PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. However, this subroutine doesn't require you to specify equal values for the first and last points - it automatically forces them to be equal by copying Y[first_point] (corresponds to the leftmost, minimal X[]) to Y[last_point]. However it is recommended to pass consistent values of Y[], i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ public static void spline1dgriddiffcubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, ref double[] d, alglib.xparams _params) { double[] a1 = new double[0]; double[] a2 = new double[0]; double[] a3 = new double[0]; double[] b = new double[0]; double[] dt = new double[0]; int[] p = new int[0]; int i = 0; int ylen = 0; int i_ = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); d = new double[0]; // // check correctness of boundary conditions // alglib.ap.assert(((boundltype==-1 || boundltype==0) || boundltype==1) || boundltype==2, "Spline1DGridDiffCubic: incorrect BoundLType!"); alglib.ap.assert(((boundrtype==-1 || boundrtype==0) || boundrtype==1) || boundrtype==2, "Spline1DGridDiffCubic: incorrect BoundRType!"); alglib.ap.assert((boundrtype==-1 && boundltype==-1) || (boundrtype!=-1 && boundltype!=-1), "Spline1DGridDiffCubic: incorrect BoundLType/BoundRType!"); if( boundltype==1 || boundltype==2 ) { alglib.ap.assert(math.isfinite(boundl), "Spline1DGridDiffCubic: BoundL is infinite or NAN!"); } if( boundrtype==1 || boundrtype==2 ) { alglib.ap.assert(math.isfinite(boundr), "Spline1DGridDiffCubic: BoundR is infinite or NAN!"); } // // check lengths of arguments // alglib.ap.assert(n>=2, "Spline1DGridDiffCubic: N<2!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DGridDiffCubic: Length(X)=n, "Spline1DGridDiffCubic: Length(Y)=2 * if given, only first N points are used * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) BoundLType - boundary condition type for the left boundary BoundL - left boundary condition (first or second derivative, depending on the BoundLType) BoundRType - boundary condition type for the right boundary BoundR - right boundary condition (first or second derivative, depending on the BoundRType) OUTPUT PARAMETERS: D1 - S' values at X[] D2 - S'' values at X[] ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. Derivative values are correctly reordered on return, so D[I] is always equal to S'(X[I]) independently of points order. SETTING BOUNDARY VALUES: The BoundLType/BoundRType parameters can have the following values: * -1, which corresonds to the periodic (cyclic) boundary conditions. In this case: * both BoundLType and BoundRType must be equal to -1. * BoundL/BoundR are ignored * Y[last] is ignored (it is assumed to be equal to Y[first]). * 0, which corresponds to the parabolically terminated spline (BoundL and/or BoundR are ignored). * 1, which corresponds to the first derivative boundary condition * 2, which corresponds to the second derivative boundary condition * by default, BoundType=0 is used PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. However, this subroutine doesn't require you to specify equal values for the first and last points - it automatically forces them to be equal by copying Y[first_point] (corresponds to the leftmost, minimal X[]) to Y[last_point]. However it is recommended to pass consistent values of Y[], i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ public static void spline1dgriddiff2cubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, ref double[] d1, ref double[] d2, alglib.xparams _params) { double[] a1 = new double[0]; double[] a2 = new double[0]; double[] a3 = new double[0]; double[] b = new double[0]; double[] dt = new double[0]; int[] p = new int[0]; int i = 0; int ylen = 0; double delta = 0; double delta2 = 0; double delta3 = 0; double s2 = 0; double s3 = 0; int i_ = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); d1 = new double[0]; d2 = new double[0]; // // check correctness of boundary conditions // alglib.ap.assert(((boundltype==-1 || boundltype==0) || boundltype==1) || boundltype==2, "Spline1DGridDiff2Cubic: incorrect BoundLType!"); alglib.ap.assert(((boundrtype==-1 || boundrtype==0) || boundrtype==1) || boundrtype==2, "Spline1DGridDiff2Cubic: incorrect BoundRType!"); alglib.ap.assert((boundrtype==-1 && boundltype==-1) || (boundrtype!=-1 && boundltype!=-1), "Spline1DGridDiff2Cubic: incorrect BoundLType/BoundRType!"); if( boundltype==1 || boundltype==2 ) { alglib.ap.assert(math.isfinite(boundl), "Spline1DGridDiff2Cubic: BoundL is infinite or NAN!"); } if( boundrtype==1 || boundrtype==2 ) { alglib.ap.assert(math.isfinite(boundr), "Spline1DGridDiff2Cubic: BoundR is infinite or NAN!"); } // // check lengths of arguments // alglib.ap.assert(n>=2, "Spline1DGridDiff2Cubic: N<2!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DGridDiff2Cubic: Length(X)=n, "Spline1DGridDiff2Cubic: Length(Y)=2 * if given, only first N points from X/Y are used * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) BoundLType - boundary condition type for the left boundary BoundL - left boundary condition (first or second derivative, depending on the BoundLType) BoundRType - boundary condition type for the right boundary BoundR - right boundary condition (first or second derivative, depending on the BoundRType) N2 - new points count: * N2>=2 * if given, only first N2 points from X2 are used * if not given, automatically detected from X2 size OUTPUT PARAMETERS: F2 - function values at X2[] ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. Function values are correctly reordered on return, so F2[I] is always equal to S(X2[I]) independently of points order. SETTING BOUNDARY VALUES: The BoundLType/BoundRType parameters can have the following values: * -1, which corresonds to the periodic (cyclic) boundary conditions. In this case: * both BoundLType and BoundRType must be equal to -1. * BoundL/BoundR are ignored * Y[last] is ignored (it is assumed to be equal to Y[first]). * 0, which corresponds to the parabolically terminated spline (BoundL and/or BoundR are ignored). * 1, which corresponds to the first derivative boundary condition * 2, which corresponds to the second derivative boundary condition * by default, BoundType=0 is used PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. However, this subroutine doesn't require you to specify equal values for the first and last points - it automatically forces them to be equal by copying Y[first_point] (corresponds to the leftmost, minimal X[]) to Y[last_point]. However it is recommended to pass consistent values of Y[], i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ public static void spline1dconvcubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, double[] x2, int n2, ref double[] y2, alglib.xparams _params) { double[] a1 = new double[0]; double[] a2 = new double[0]; double[] a3 = new double[0]; double[] b = new double[0]; double[] d = new double[0]; double[] dt = new double[0]; double[] d1 = new double[0]; double[] d2 = new double[0]; int[] p = new int[0]; int[] p2 = new int[0]; int i = 0; int ylen = 0; double t = 0; double t2 = 0; int i_ = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); x2 = (double[])x2.Clone(); y2 = new double[0]; // // check correctness of boundary conditions // alglib.ap.assert(((boundltype==-1 || boundltype==0) || boundltype==1) || boundltype==2, "Spline1DConvCubic: incorrect BoundLType!"); alglib.ap.assert(((boundrtype==-1 || boundrtype==0) || boundrtype==1) || boundrtype==2, "Spline1DConvCubic: incorrect BoundRType!"); alglib.ap.assert((boundrtype==-1 && boundltype==-1) || (boundrtype!=-1 && boundltype!=-1), "Spline1DConvCubic: incorrect BoundLType/BoundRType!"); if( boundltype==1 || boundltype==2 ) { alglib.ap.assert(math.isfinite(boundl), "Spline1DConvCubic: BoundL is infinite or NAN!"); } if( boundrtype==1 || boundrtype==2 ) { alglib.ap.assert(math.isfinite(boundr), "Spline1DConvCubic: BoundR is infinite or NAN!"); } // // check lengths of arguments // alglib.ap.assert(n>=2, "Spline1DConvCubic: N<2!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DConvCubic: Length(X)=n, "Spline1DConvCubic: Length(Y)=2, "Spline1DConvCubic: N2<2!"); alglib.ap.assert(alglib.ap.len(x2)>=n2, "Spline1DConvCubic: Length(X2)=n2, "Spline1DConvCubic: internal error!"); for(i=0; i<=n2-1; i++) { dt[p2[i]] = y2[i]; } for(i_=0; i_<=n2-1;i_++) { y2[i_] = dt[i_]; } } /************************************************************************* This function solves following problem: given table y[] of function values at old nodes x[] and new nodes x2[], it calculates and returns table of function values y2[] and derivatives d2[] (calculated at x2[]). This function yields same result as Spline1DBuildCubic() call followed by sequence of Spline1DDiff() calls, but it can be several times faster when called for ordered X[] and X2[]. INPUT PARAMETERS: X - old spline nodes Y - function values X2 - new spline nodes OPTIONAL PARAMETERS: N - points count: * N>=2 * if given, only first N points from X/Y are used * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) BoundLType - boundary condition type for the left boundary BoundL - left boundary condition (first or second derivative, depending on the BoundLType) BoundRType - boundary condition type for the right boundary BoundR - right boundary condition (first or second derivative, depending on the BoundRType) N2 - new points count: * N2>=2 * if given, only first N2 points from X2 are used * if not given, automatically detected from X2 size OUTPUT PARAMETERS: F2 - function values at X2[] D2 - first derivatives at X2[] ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. Function values are correctly reordered on return, so F2[I] is always equal to S(X2[I]) independently of points order. SETTING BOUNDARY VALUES: The BoundLType/BoundRType parameters can have the following values: * -1, which corresonds to the periodic (cyclic) boundary conditions. In this case: * both BoundLType and BoundRType must be equal to -1. * BoundL/BoundR are ignored * Y[last] is ignored (it is assumed to be equal to Y[first]). * 0, which corresponds to the parabolically terminated spline (BoundL and/or BoundR are ignored). * 1, which corresponds to the first derivative boundary condition * 2, which corresponds to the second derivative boundary condition * by default, BoundType=0 is used PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. However, this subroutine doesn't require you to specify equal values for the first and last points - it automatically forces them to be equal by copying Y[first_point] (corresponds to the leftmost, minimal X[]) to Y[last_point]. However it is recommended to pass consistent values of Y[], i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ public static void spline1dconvdiffcubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, double[] x2, int n2, ref double[] y2, ref double[] d2, alglib.xparams _params) { double[] a1 = new double[0]; double[] a2 = new double[0]; double[] a3 = new double[0]; double[] b = new double[0]; double[] d = new double[0]; double[] dt = new double[0]; double[] rt1 = new double[0]; int[] p = new int[0]; int[] p2 = new int[0]; int i = 0; int ylen = 0; double t = 0; double t2 = 0; int i_ = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); x2 = (double[])x2.Clone(); y2 = new double[0]; d2 = new double[0]; // // check correctness of boundary conditions // alglib.ap.assert(((boundltype==-1 || boundltype==0) || boundltype==1) || boundltype==2, "Spline1DConvDiffCubic: incorrect BoundLType!"); alglib.ap.assert(((boundrtype==-1 || boundrtype==0) || boundrtype==1) || boundrtype==2, "Spline1DConvDiffCubic: incorrect BoundRType!"); alglib.ap.assert((boundrtype==-1 && boundltype==-1) || (boundrtype!=-1 && boundltype!=-1), "Spline1DConvDiffCubic: incorrect BoundLType/BoundRType!"); if( boundltype==1 || boundltype==2 ) { alglib.ap.assert(math.isfinite(boundl), "Spline1DConvDiffCubic: BoundL is infinite or NAN!"); } if( boundrtype==1 || boundrtype==2 ) { alglib.ap.assert(math.isfinite(boundr), "Spline1DConvDiffCubic: BoundR is infinite or NAN!"); } // // check lengths of arguments // alglib.ap.assert(n>=2, "Spline1DConvDiffCubic: N<2!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DConvDiffCubic: Length(X)=n, "Spline1DConvDiffCubic: Length(Y)=2, "Spline1DConvDiffCubic: N2<2!"); alglib.ap.assert(alglib.ap.len(x2)>=n2, "Spline1DConvDiffCubic: Length(X2)=n2, "Spline1DConvDiffCubic: internal error!"); for(i=0; i<=n2-1; i++) { dt[p2[i]] = y2[i]; } for(i_=0; i_<=n2-1;i_++) { y2[i_] = dt[i_]; } for(i=0; i<=n2-1; i++) { dt[p2[i]] = d2[i]; } for(i_=0; i_<=n2-1;i_++) { d2[i_] = dt[i_]; } } /************************************************************************* This function solves following problem: given table y[] of function values at old nodes x[] and new nodes x2[], it calculates and returns table of function values y2[], first and second derivatives d2[] and dd2[] (calculated at x2[]). This function yields same result as Spline1DBuildCubic() call followed by sequence of Spline1DDiff() calls, but it can be several times faster when called for ordered X[] and X2[]. INPUT PARAMETERS: X - old spline nodes Y - function values X2 - new spline nodes OPTIONAL PARAMETERS: N - points count: * N>=2 * if given, only first N points from X/Y are used * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) BoundLType - boundary condition type for the left boundary BoundL - left boundary condition (first or second derivative, depending on the BoundLType) BoundRType - boundary condition type for the right boundary BoundR - right boundary condition (first or second derivative, depending on the BoundRType) N2 - new points count: * N2>=2 * if given, only first N2 points from X2 are used * if not given, automatically detected from X2 size OUTPUT PARAMETERS: F2 - function values at X2[] D2 - first derivatives at X2[] DD2 - second derivatives at X2[] ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. Function values are correctly reordered on return, so F2[I] is always equal to S(X2[I]) independently of points order. SETTING BOUNDARY VALUES: The BoundLType/BoundRType parameters can have the following values: * -1, which corresonds to the periodic (cyclic) boundary conditions. In this case: * both BoundLType and BoundRType must be equal to -1. * BoundL/BoundR are ignored * Y[last] is ignored (it is assumed to be equal to Y[first]). * 0, which corresponds to the parabolically terminated spline (BoundL and/or BoundR are ignored). * 1, which corresponds to the first derivative boundary condition * 2, which corresponds to the second derivative boundary condition * by default, BoundType=0 is used PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. However, this subroutine doesn't require you to specify equal values for the first and last points - it automatically forces them to be equal by copying Y[first_point] (corresponds to the leftmost, minimal X[]) to Y[last_point]. However it is recommended to pass consistent values of Y[], i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ public static void spline1dconvdiff2cubic(double[] x, double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, double[] x2, int n2, ref double[] y2, ref double[] d2, ref double[] dd2, alglib.xparams _params) { double[] a1 = new double[0]; double[] a2 = new double[0]; double[] a3 = new double[0]; double[] b = new double[0]; double[] d = new double[0]; double[] dt = new double[0]; int[] p = new int[0]; int[] p2 = new int[0]; int i = 0; int ylen = 0; double t = 0; double t2 = 0; int i_ = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); x2 = (double[])x2.Clone(); y2 = new double[0]; d2 = new double[0]; dd2 = new double[0]; // // check correctness of boundary conditions // alglib.ap.assert(((boundltype==-1 || boundltype==0) || boundltype==1) || boundltype==2, "Spline1DConvDiff2Cubic: incorrect BoundLType!"); alglib.ap.assert(((boundrtype==-1 || boundrtype==0) || boundrtype==1) || boundrtype==2, "Spline1DConvDiff2Cubic: incorrect BoundRType!"); alglib.ap.assert((boundrtype==-1 && boundltype==-1) || (boundrtype!=-1 && boundltype!=-1), "Spline1DConvDiff2Cubic: incorrect BoundLType/BoundRType!"); if( boundltype==1 || boundltype==2 ) { alglib.ap.assert(math.isfinite(boundl), "Spline1DConvDiff2Cubic: BoundL is infinite or NAN!"); } if( boundrtype==1 || boundrtype==2 ) { alglib.ap.assert(math.isfinite(boundr), "Spline1DConvDiff2Cubic: BoundR is infinite or NAN!"); } // // check lengths of arguments // alglib.ap.assert(n>=2, "Spline1DConvDiff2Cubic: N<2!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DConvDiff2Cubic: Length(X)=n, "Spline1DConvDiff2Cubic: Length(Y)=2, "Spline1DConvDiff2Cubic: N2<2!"); alglib.ap.assert(alglib.ap.len(x2)>=n2, "Spline1DConvDiff2Cubic: Length(X2)=n2, "Spline1DConvDiff2Cubic: internal error!"); for(i=0; i<=n2-1; i++) { dt[p2[i]] = y2[i]; } for(i_=0; i_<=n2-1;i_++) { y2[i_] = dt[i_]; } for(i=0; i<=n2-1; i++) { dt[p2[i]] = d2[i]; } for(i_=0; i_<=n2-1;i_++) { d2[i_] = dt[i_]; } for(i=0; i<=n2-1; i++) { dt[p2[i]] = dd2[i]; } for(i_=0; i_<=n2-1;i_++) { dd2[i_] = dt[i_]; } } /************************************************************************* This subroutine builds Catmull-Rom spline interpolant. INPUT PARAMETERS: X - spline nodes, array[0..N-1]. Y - function values, array[0..N-1]. OPTIONAL PARAMETERS: N - points count: * N>=2 * if given, only first N points are used to build spline * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) BoundType - boundary condition type: * -1 for periodic boundary condition * 0 for parabolically terminated spline (default) Tension - tension parameter: * tension=0 corresponds to classic Catmull-Rom spline (default) * 0=2, "Spline1DBuildCatmullRom: N<2!"); alglib.ap.assert(boundtype==-1 || boundtype==0, "Spline1DBuildCatmullRom: incorrect BoundType!"); alglib.ap.assert((double)(tension)>=(double)(0), "Spline1DBuildCatmullRom: Tension<0!"); alglib.ap.assert((double)(tension)<=(double)(1), "Spline1DBuildCatmullRom: Tension>1!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DBuildCatmullRom: Length(X)=n, "Spline1DBuildCatmullRom: Length(Y)=2 * if given, only first N points are used to build spline * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) OUTPUT PARAMETERS: C - spline interpolant. ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. -- ALGLIB PROJECT -- Copyright 23.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1dbuildhermite(double[] x, double[] y, double[] d, int n, spline1dinterpolant c, alglib.xparams _params) { int i = 0; double delta = 0; double delta2 = 0; double delta3 = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); d = (double[])d.Clone(); alglib.ap.assert(n>=2, "Spline1DBuildHermite: N<2!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DBuildHermite: Length(X)=n, "Spline1DBuildHermite: Length(Y)=n, "Spline1DBuildHermite: Length(D)=2 * if given, only first N points are used to build spline * if not given, automatically detected from X/Y sizes (len(X) must be equal to len(Y)) OUTPUT PARAMETERS: C - spline interpolant ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. -- ALGLIB PROJECT -- Copyright 24.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1dbuildakima(double[] x, double[] y, int n, spline1dinterpolant c, alglib.xparams _params) { int i = 0; double[] d = new double[0]; double[] w = new double[0]; double[] diff = new double[0]; x = (double[])x.Clone(); y = (double[])y.Clone(); alglib.ap.assert(n>=2, "Spline1DBuildAkima: N<2!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DBuildAkima: Length(X)=n, "Spline1DBuildAkima: Length(Y)=x ) { r = m; } else { l = m; } } // // Interpolation // x = x-c.x[l]; m = 4*l; result = c.c[m]+x*(c.c[m+1]+x*(c.c[m+2]+x*c.c[m+3])); return result; } /************************************************************************* This subroutine differentiates the spline. INPUT PARAMETERS: C - spline interpolant. X - point Result: S - S(x) DS - S'(x) D2S - S''(x) -- ALGLIB PROJECT -- Copyright 24.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1ddiff(spline1dinterpolant c, double x, ref double s, ref double ds, ref double d2s, alglib.xparams _params) { int l = 0; int r = 0; int m = 0; double t = 0; s = 0; ds = 0; d2s = 0; alglib.ap.assert(c.k==3, "Spline1DDiff: internal error"); alglib.ap.assert(!Double.IsInfinity(x), "Spline1DDiff: infinite X!"); // // special case: NaN // if( Double.IsNaN(x) ) { s = Double.NaN; ds = Double.NaN; d2s = Double.NaN; return; } // // correct if periodic // if( c.periodic ) { apserv.apperiodicmap(ref x, c.x[0], c.x[c.n-1], ref t, _params); } // // Binary search // l = 0; r = c.n-2+1; while( l!=r-1 ) { m = (l+r)/2; if( c.x[m]>=x ) { r = m; } else { l = m; } } // // Differentiation // x = x-c.x[l]; m = 4*l; s = c.c[m]+x*(c.c[m+1]+x*(c.c[m+2]+x*c.c[m+3])); ds = c.c[m+1]+2*x*c.c[m+2]+3*math.sqr(x)*c.c[m+3]; d2s = 2*c.c[m+2]+6*x*c.c[m+3]; } /************************************************************************* This subroutine makes the copy of the spline. INPUT PARAMETERS: C - spline interpolant. Result: CC - spline copy -- ALGLIB PROJECT -- Copyright 29.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1dcopy(spline1dinterpolant c, spline1dinterpolant cc, alglib.xparams _params) { int s = 0; int i_ = 0; cc.periodic = c.periodic; cc.n = c.n; cc.k = c.k; cc.continuity = c.continuity; cc.x = new double[cc.n]; for(i_=0; i_<=cc.n-1;i_++) { cc.x[i_] = c.x[i_]; } s = alglib.ap.len(c.c); cc.c = new double[s]; for(i_=0; i_<=s-1;i_++) { cc.c[i_] = c.c[i_]; } } /************************************************************************* This subroutine unpacks the spline into the coefficients table. INPUT PARAMETERS: C - spline interpolant. X - point OUTPUT PARAMETERS: Tbl - coefficients table, unpacked format, array[0..N-2, 0..5]. For I = 0...N-2: Tbl[I,0] = X[i] Tbl[I,1] = X[i+1] Tbl[I,2] = C0 Tbl[I,3] = C1 Tbl[I,4] = C2 Tbl[I,5] = C3 On [x[i], x[i+1]] spline is equals to: S(x) = C0 + C1*t + C2*t^2 + C3*t^3 t = x-x[i] NOTE: You can rebuild spline with Spline1DBuildHermite() function, which accepts as inputs function values and derivatives at nodes, which are easy to calculate when you have coefficients. -- ALGLIB PROJECT -- Copyright 29.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1dunpack(spline1dinterpolant c, ref int n, ref double[,] tbl, alglib.xparams _params) { int i = 0; int j = 0; n = 0; tbl = new double[0,0]; tbl = new double[c.n-2+1, 2+c.k+1]; n = c.n; // // Fill // for(i=0; i<=n-2; i++) { tbl[i,0] = c.x[i]; tbl[i,1] = c.x[i+1]; for(j=0; j<=c.k; j++) { tbl[i,2+j] = c.c[(c.k+1)*i+j]; } } } /************************************************************************* This subroutine performs linear transformation of the spline argument. INPUT PARAMETERS: C - spline interpolant. A, B- transformation coefficients: x = A*t + B Result: C - transformed spline -- ALGLIB PROJECT -- Copyright 30.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1dlintransx(spline1dinterpolant c, double a, double b, alglib.xparams _params) { int i = 0; int n = 0; double v = 0; double dv = 0; double d2v = 0; double[] x = new double[0]; double[] y = new double[0]; double[] d = new double[0]; bool isperiodic = new bool(); int contval = 0; alglib.ap.assert(c.k==3, "Spline1DLinTransX: internal error"); n = c.n; x = new double[n]; y = new double[n]; d = new double[n]; // // Unpack, X, Y, dY/dX. // Scale and pack with Spline1DBuildHermite again. // if( (double)(a)==(double)(0) ) { // // Special case: A=0 // v = spline1dcalc(c, b, _params); for(i=0; i<=n-1; i++) { x[i] = c.x[i]; y[i] = v; d[i] = 0.0; } } else { // // General case, A<>0 // for(i=0; i<=n-1; i++) { x[i] = c.x[i]; spline1ddiff(c, x[i], ref v, ref dv, ref d2v, _params); x[i] = (x[i]-b)/a; y[i] = v; d[i] = a*dv; } } isperiodic = c.periodic; contval = c.continuity; if( contval>0 ) { spline1dbuildhermite(x, y, d, n, c, _params); } else { spline1dbuildlinear(x, y, n, c, _params); } c.periodic = isperiodic; c.continuity = contval; } /************************************************************************* This subroutine performs linear transformation of the spline. INPUT PARAMETERS: C - spline interpolant. A, B- transformation coefficients: S2(x) = A*S(x) + B Result: C - transformed spline -- ALGLIB PROJECT -- Copyright 30.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline1dlintransy(spline1dinterpolant c, double a, double b, alglib.xparams _params) { int i = 0; int j = 0; int n = 0; alglib.ap.assert(c.k==3, "Spline1DLinTransX: internal error"); n = c.n; for(i=0; i<=n-2; i++) { c.c[4*i] = a*c.c[4*i]+b; for(j=1; j<=3; j++) { c.c[4*i+j] = a*c.c[4*i+j]; } } c.c[4*(n-1)+0] = a*c.c[4*(n-1)+0]+b; c.c[4*(n-1)+1] = a*c.c[4*(n-1)+1]; } /************************************************************************* This subroutine integrates the spline. INPUT PARAMETERS: C - spline interpolant. X - right bound of the integration interval [a, x], here 'a' denotes min(x[]) Result: integral(S(t)dt,a,x) -- ALGLIB PROJECT -- Copyright 23.06.2007 by Bochkanov Sergey *************************************************************************/ public static double spline1dintegrate(spline1dinterpolant c, double x, alglib.xparams _params) { double result = 0; int n = 0; int i = 0; int j = 0; int l = 0; int r = 0; int m = 0; double w = 0; double v = 0; double t = 0; double intab = 0; double additionalterm = 0; n = c.n; // // Periodic splines require special treatment. We make // following transformation: // // integral(S(t)dt,A,X) = integral(S(t)dt,A,Z)+AdditionalTerm // // here X may lie outside of [A,B], Z lies strictly in [A,B], // AdditionalTerm is equals to integral(S(t)dt,A,B) times some // integer number (may be zero). // if( c.periodic && ((double)(x)<(double)(c.x[0]) || (double)(x)>(double)(c.x[c.n-1])) ) { // // compute integral(S(x)dx,A,B) // intab = 0; for(i=0; i<=c.n-2; i++) { w = c.x[i+1]-c.x[i]; m = (c.k+1)*i; intab = intab+c.c[m]*w; v = w; for(j=1; j<=c.k; j++) { v = v*w; intab = intab+c.c[m+j]*v/(j+1); } } // // map X into [A,B] // apserv.apperiodicmap(ref x, c.x[0], c.x[c.n-1], ref t, _params); additionalterm = t*intab; } else { additionalterm = 0; } // // Binary search in the [ x[0], ..., x[n-2] ] (x[n-1] is not included) // l = 0; r = n-2+1; while( l!=r-1 ) { m = (l+r)/2; if( (double)(c.x[m])>=(double)(x) ) { r = m; } else { l = m; } } // // Integration // result = 0; for(i=0; i<=l-1; i++) { w = c.x[i+1]-c.x[i]; m = (c.k+1)*i; result = result+c.c[m]*w; v = w; for(j=1; j<=c.k; j++) { v = v*w; result = result+c.c[m+j]*v/(j+1); } } w = x-c.x[l]; m = (c.k+1)*l; v = w; result = result+c.c[m]*w; for(j=1; j<=c.k; j++) { v = v*w; result = result+c.c[m+j]*v/(j+1); } result = result+additionalterm; return result; } /************************************************************************* Fitting by penalized cubic spline. Equidistant grid with M nodes on [min(x,xc),max(x,xc)] is used to build basis functions. Basis functions are cubic splines with natural boundary conditions. Problem is regularized by adding non-linearity penalty to the usual least squares penalty function: S(x) = arg min { LS + P }, where LS = SUM { w[i]^2*(y[i] - S(x[i]))^2 } - least squares penalty P = C*10^rho*integral{ S''(x)^2*dx } - non-linearity penalty rho - tunable constant given by user C - automatically determined scale parameter, makes penalty invariant with respect to scaling of X, Y, W. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - points, array[0..N-1]. Y - function values, array[0..N-1]. N - number of points (optional): * N>0 * if given, only first N elements of X/Y are processed * if not given, automatically determined from X/Y sizes M - number of basis functions ( = number_of_nodes), M>=4. Rho - regularization constant passed by user. It penalizes nonlinearity in the regression spline. It is logarithmically scaled, i.e. actual value of regularization constant is calculated as 10^Rho. It is automatically scaled so that: * Rho=2.0 corresponds to moderate amount of nonlinearity * generally, it should be somewhere in the [-8.0,+8.0] If you do not want to penalize nonlineary, pass small Rho. Values as low as -15 should work. OUTPUT PARAMETERS: Info- same format as in LSFitLinearWC() subroutine. * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD or Cholesky decomposition; problem may be too ill-conditioned (very rare) S - spline interpolant. Rep - Following fields are set: * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroitine doesn't calculate task's condition number for K<>0. NOTE 1: additional nodes are added to the spline outside of the fitting interval to force linearity when xmax(x,xc). It is done for consistency - we penalize non-linearity at [min(x,xc),max(x,xc)], so it is natural to force linearity outside of this interval. NOTE 2: function automatically sorts points, so caller may pass unsorted array. -- ALGLIB PROJECT -- Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ public static void spline1dfitpenalized(double[] x, double[] y, int n, int m, double rho, ref int info, spline1dinterpolant s, spline1dfitreport rep, alglib.xparams _params) { double[] w = new double[0]; int i = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); info = 0; alglib.ap.assert(n>=1, "Spline1DFitPenalized: N<1!"); alglib.ap.assert(m>=4, "Spline1DFitPenalized: M<4!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DFitPenalized: Length(X)=n, "Spline1DFitPenalized: Length(Y)0 * if given, only first N elements of X/Y/W are processed * if not given, automatically determined from X/Y/W sizes M - number of basis functions ( = number_of_nodes), M>=4. Rho - regularization constant passed by user. It penalizes nonlinearity in the regression spline. It is logarithmically scaled, i.e. actual value of regularization constant is calculated as 10^Rho. It is automatically scaled so that: * Rho=2.0 corresponds to moderate amount of nonlinearity * generally, it should be somewhere in the [-8.0,+8.0] If you do not want to penalize nonlineary, pass small Rho. Values as low as -15 should work. OUTPUT PARAMETERS: Info- same format as in LSFitLinearWC() subroutine. * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD or Cholesky decomposition; problem may be too ill-conditioned (very rare) S - spline interpolant. Rep - Following fields are set: * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroitine doesn't calculate task's condition number for K<>0. NOTE 1: additional nodes are added to the spline outside of the fitting interval to force linearity when xmax(x,xc). It is done for consistency - we penalize non-linearity at [min(x,xc),max(x,xc)], so it is natural to force linearity outside of this interval. NOTE 2: function automatically sorts points, so caller may pass unsorted array. -- ALGLIB PROJECT -- Copyright 19.10.2010 by Bochkanov Sergey *************************************************************************/ public static void spline1dfitpenalizedw(double[] x, double[] y, double[] w, int n, int m, double rho, ref int info, spline1dinterpolant s, spline1dfitreport rep, alglib.xparams _params) { int i = 0; int j = 0; int b = 0; double v = 0; double relcnt = 0; double xa = 0; double xb = 0; double sa = 0; double sb = 0; double[] xoriginal = new double[0]; double[] yoriginal = new double[0]; double pdecay = 0; double tdecay = 0; double[,] fmatrix = new double[0,0]; double[] fcolumn = new double[0]; double[] y2 = new double[0]; double[] w2 = new double[0]; double[] xc = new double[0]; double[] yc = new double[0]; int[] dc = new int[0]; double fdmax = 0; double admax = 0; double[,] amatrix = new double[0,0]; double[,] d2matrix = new double[0,0]; double fa = 0; double ga = 0; double fb = 0; double gb = 0; double lambdav = 0; double[] bx = new double[0]; double[] by = new double[0]; double[] bd1 = new double[0]; double[] bd2 = new double[0]; double[] tx = new double[0]; double[] ty = new double[0]; double[] td = new double[0]; spline1dinterpolant bs = new spline1dinterpolant(); double[,] nmatrix = new double[0,0]; double[] rightpart = new double[0]; fbls.fblslincgstate cgstate = new fbls.fblslincgstate(); double[] c = new double[0]; double[] tmp0 = new double[0]; int i_ = 0; int i1_ = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); w = (double[])w.Clone(); info = 0; alglib.ap.assert(n>=1, "Spline1DFitPenalizedW: N<1!"); alglib.ap.assert(m>=4, "Spline1DFitPenalizedW: M<4!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DFitPenalizedW: Length(X)=n, "Spline1DFitPenalizedW: Length(Y)=n, "Spline1DFitPenalizedW: Length(W)(double)(v) ) { rho = v; } lambdav = Math.Pow(10, rho); // // Sort X, Y, W // heapsortdpoints(ref x, ref y, ref w, n, _params); // // Scale X, Y, XC, YC // intfitserv.lsfitscalexy(ref x, ref y, ref w, n, ref xc, ref yc, dc, 0, ref xa, ref xb, ref sa, ref sb, ref xoriginal, ref yoriginal, _params); // // Allocate space // fmatrix = new double[n, m]; amatrix = new double[m, m]; d2matrix = new double[m, m]; bx = new double[m]; by = new double[m]; fcolumn = new double[n]; nmatrix = new double[m, m]; rightpart = new double[m]; tmp0 = new double[Math.Max(m, n)]; c = new double[m]; // // Fill: // * FMatrix by values of basis functions // * TmpAMatrix by second derivatives of I-th function at J-th point // * CMatrix by constraints // fdmax = 0; for(b=0; b<=m-1; b++) { // // Prepare I-th basis function // for(j=0; j<=m-1; j++) { bx[j] = (double)(2*j)/(double)(m-1)-1; by[j] = 0; } by[b] = 1; spline1dgriddiff2cubic(bx, by, m, 2, 0.0, 2, 0.0, ref bd1, ref bd2, _params); spline1dbuildcubic(bx, by, m, 2, 0.0, 2, 0.0, bs, _params); // // Calculate B-th column of FMatrix // Update FDMax (maximum column norm) // spline1dconvcubic(bx, by, m, 2, 0.0, 2, 0.0, x, n, ref fcolumn, _params); for(i_=0; i_<=n-1;i_++) { fmatrix[i_,b] = fcolumn[i_]; } v = 0; for(i=0; i<=n-1; i++) { v = v+math.sqr(w[i]*fcolumn[i]); } fdmax = Math.Max(fdmax, v); // // Fill temporary with second derivatives of basis function // for(i_=0; i_<=m-1;i_++) { d2matrix[b,i_] = bd2[i_]; } } // // * calculate penalty matrix A // * calculate max of diagonal elements of A // * calculate PDecay - coefficient before penalty matrix // for(i=0; i<=m-1; i++) { for(j=i; j<=m-1; j++) { // // calculate integral(B_i''*B_j'') where B_i and B_j are // i-th and j-th basis splines. // B_i and B_j are piecewise linear functions. // v = 0; for(b=0; b<=m-2; b++) { fa = d2matrix[i,b]; fb = d2matrix[i,b+1]; ga = d2matrix[j,b]; gb = d2matrix[j,b+1]; v = v+(bx[b+1]-bx[b])*(fa*ga+(fa*(gb-ga)+ga*(fb-fa))/2+(fb-fa)*(gb-ga)/3); } amatrix[i,j] = v; amatrix[j,i] = v; } } admax = 0; for(i=0; i<=m-1; i++) { admax = Math.Max(admax, Math.Abs(amatrix[i,i])); } pdecay = lambdav*fdmax/admax; // // Calculate TDecay for Tikhonov regularization // tdecay = fdmax*(1+pdecay)*10*math.machineepsilon; // // Prepare system // // NOTE: FMatrix is spoiled during this process // for(i=0; i<=n-1; i++) { v = w[i]; for(i_=0; i_<=m-1;i_++) { fmatrix[i,i_] = v*fmatrix[i,i_]; } } ablas.rmatrixgemm(m, m, n, 1.0, fmatrix, 0, 0, 1, fmatrix, 0, 0, 0, 0.0, nmatrix, 0, 0, _params); for(i=0; i<=m-1; i++) { for(j=0; j<=m-1; j++) { nmatrix[i,j] = nmatrix[i,j]+pdecay*amatrix[i,j]; } } for(i=0; i<=m-1; i++) { nmatrix[i,i] = nmatrix[i,i]+tdecay; } for(i=0; i<=m-1; i++) { rightpart[i] = 0; } for(i=0; i<=n-1; i++) { v = y[i]*w[i]; for(i_=0; i_<=m-1;i_++) { rightpart[i_] = rightpart[i_] + v*fmatrix[i,i_]; } } // // Solve system // if( !trfac.spdmatrixcholesky(ref nmatrix, m, true, _params) ) { info = -4; return; } fbls.fblscholeskysolve(nmatrix, 1.0, m, true, rightpart, ref tmp0, _params); for(i_=0; i_<=m-1;i_++) { c[i_] = rightpart[i_]; } // // add nodes to force linearity outside of the fitting interval // spline1dgriddiffcubic(bx, c, m, 2, 0.0, 2, 0.0, ref bd1, _params); tx = new double[m+2]; ty = new double[m+2]; td = new double[m+2]; i1_ = (0) - (1); for(i_=1; i_<=m;i_++) { tx[i_] = bx[i_+i1_]; } i1_ = (0) - (1); for(i_=1; i_<=m;i_++) { ty[i_] = rightpart[i_+i1_]; } i1_ = (0) - (1); for(i_=1; i_<=m;i_++) { td[i_] = bd1[i_+i1_]; } tx[0] = tx[1]-(tx[2]-tx[1]); ty[0] = ty[1]-td[1]*(tx[2]-tx[1]); td[0] = td[1]; tx[m+1] = tx[m]+(tx[m]-tx[m-1]); ty[m+1] = ty[m]+td[m]*(tx[m]-tx[m-1]); td[m+1] = td[m]; spline1dbuildhermite(tx, ty, td, m+2, s, _params); spline1dlintransx(s, 2/(xb-xa), -((xa+xb)/(xb-xa)), _params); spline1dlintransy(s, sb-sa, sa, _params); info = 1; // // Fill report // rep.rmserror = 0; rep.avgerror = 0; rep.avgrelerror = 0; rep.maxerror = 0; relcnt = 0; spline1dconvcubic(bx, rightpart, m, 2, 0.0, 2, 0.0, x, n, ref fcolumn, _params); for(i=0; i<=n-1; i++) { v = (sb-sa)*fcolumn[i]+sa; rep.rmserror = rep.rmserror+math.sqr(v-yoriginal[i]); rep.avgerror = rep.avgerror+Math.Abs(v-yoriginal[i]); if( (double)(yoriginal[i])!=(double)(0) ) { rep.avgrelerror = rep.avgrelerror+Math.Abs(v-yoriginal[i])/Math.Abs(yoriginal[i]); relcnt = relcnt+1; } rep.maxerror = Math.Max(rep.maxerror, Math.Abs(v-yoriginal[i])); } rep.rmserror = Math.Sqrt(rep.rmserror/n); rep.avgerror = rep.avgerror/n; if( (double)(relcnt)!=(double)(0) ) { rep.avgrelerror = rep.avgrelerror/relcnt; } } /************************************************************************* Internal version of Spline1DConvDiff Converts from Hermite spline given by grid XOld to new grid X2 INPUT PARAMETERS: XOld - old grid YOld - values at old grid DOld - first derivative at old grid N - grid size X2 - new grid N2 - new grid size Y - possibly preallocated output array (reallocate if too small) NeedY - do we need Y? D1 - possibly preallocated output array (reallocate if too small) NeedD1 - do we need D1? D2 - possibly preallocated output array (reallocate if too small) NeedD2 - do we need D1? OUTPUT ARRAYS: Y - values, if needed D1 - first derivative, if needed D2 - second derivative, if needed -- ALGLIB PROJECT -- Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ public static void spline1dconvdiffinternal(double[] xold, double[] yold, double[] dold, int n, double[] x2, int n2, ref double[] y, bool needy, ref double[] d1, bool needd1, ref double[] d2, bool needd2, alglib.xparams _params) { int intervalindex = 0; int pointindex = 0; bool havetoadvance = new bool(); double c0 = 0; double c1 = 0; double c2 = 0; double c3 = 0; double a = 0; double b = 0; double w = 0; double w2 = 0; double w3 = 0; double fa = 0; double fb = 0; double da = 0; double db = 0; double t = 0; // // Prepare space // if( needy && alglib.ap.len(y)=n2 ) { break; } t = x2[pointindex]; // // do we need to advance interval? // havetoadvance = false; if( intervalindex==-1 ) { havetoadvance = true; } else { if( intervalindex=(double)(b); } } if( havetoadvance ) { intervalindex = intervalindex+1; a = xold[intervalindex]; b = xold[intervalindex+1]; w = b-a; w2 = w*w; w3 = w*w2; fa = yold[intervalindex]; fb = yold[intervalindex+1]; da = dold[intervalindex]; db = dold[intervalindex+1]; c0 = fa; c1 = da; c2 = (3*(fb-fa)-2*da*w-db*w)/w2; c3 = (2*(fa-fb)+da*w+db*w)/w3; continue; } // // Calculate spline and its derivatives using power basis // t = t-a; if( needy ) { y[pointindex] = c0+t*(c1+t*(c2+t*c3)); } if( needd1 ) { d1[pointindex] = c1+2*t*c2+3*t*t*c3; } if( needd2 ) { d2[pointindex] = 2*c2+6*t*c3; } pointindex = pointindex+1; } } /************************************************************************* This function finds all roots and extrema of the spline S(x) defined at [A,B] (interval which contains spline nodes). It does not extrapolates function, so roots and extrema located outside of [A,B] will not be found. It returns all isolated (including multiple) roots and extrema. INPUT PARAMETERS C - spline interpolant OUTPUT PARAMETERS R - array[NR], contains roots of the spline. In case there is no roots, this array has zero length. NR - number of roots, >=0 DR - is set to True in case there is at least one interval where spline is just a zero constant. Such degenerate cases are not reported in the R/NR E - array[NE], contains extrema (maximums/minimums) of the spline. In case there is no extrema, this array has zero length. ET - array[NE], extrema types: * ET[i]>0 in case I-th extrema is a minimum * ET[i]<0 in case I-th extrema is a maximum NE - number of extrema, >=0 DE - is set to True in case there is at least one interval where spline is a constant. Such degenerate cases are not reported in the E/NE. NOTES: 1. This function does NOT report following kinds of roots: * intervals where function is constantly zero * roots which are outside of [A,B] (note: it CAN return A or B) 2. This function does NOT report following kinds of extrema: * intervals where function is a constant * extrema which are outside of (A,B) (note: it WON'T return A or B) -- ALGLIB PROJECT -- Copyright 26.09.2011 by Bochkanov Sergey *************************************************************************/ public static void spline1drootsandextrema(spline1dinterpolant c, ref double[] r, ref int nr, ref bool dr, ref double[] e, ref int[] et, ref int ne, ref bool de, alglib.xparams _params) { double pl = 0; double ml = 0; double pll = 0; double pr = 0; double mr = 0; double[] tr = new double[0]; double[] tmpr = new double[0]; double[] tmpe = new double[0]; int[] tmpet = new int[0]; double[] tmpc = new double[0]; double x0 = 0; double x1 = 0; double x2 = 0; double ex0 = 0; double ex1 = 0; int tne = 0; int tnr = 0; int i = 0; int j = 0; bool nstep = new bool(); r = new double[0]; nr = 0; dr = new bool(); e = new double[0]; et = new int[0]; ne = 0; de = new bool(); // //exception handling // alglib.ap.assert(c.k==3, "Spline1DRootsAndExtrema : incorrect parameter C.K!"); alglib.ap.assert(c.continuity>=0, "Spline1DRootsAndExtrema : parameter C.Continuity must not be less than 0!"); // //initialization of variable // nr = 0; ne = 0; dr = false; de = false; nstep = true; // //consider case, when C.Continuty=0 // if( c.continuity==0 ) { // //allocation for auxiliary arrays //'TmpR ' - it stores a time value for roots //'TmpE ' - it stores a time value for extremums //'TmpET '- it stores a time value for extremums type // apserv.rvectorsetlengthatleast(ref tmpr, 3*(c.n-1), _params); apserv.rvectorsetlengthatleast(ref tmpe, 2*(c.n-1), _params); apserv.ivectorsetlengthatleast(ref tmpet, 2*(c.n-1), _params); // //start calculating // for(i=0; i<=c.n-2; i++) { // //initialization pL, mL, pR, mR // pl = c.c[4*i]; ml = c.c[4*i+1]; pr = c.c[4*(i+1)]; mr = c.c[4*i+1]+2*c.c[4*i+2]*(c.x[i+1]-c.x[i])+3*c.c[4*i+3]*(c.x[i+1]-c.x[i])*(c.x[i+1]-c.x[i]); // //pre-searching roots and extremums // solvecubicpolinom(pl, ml, pr, mr, c.x[i], c.x[i+1], ref x0, ref x1, ref x2, ref ex0, ref ex1, ref tnr, ref tne, ref tr, _params); dr = dr || tnr==-1; de = de || tne==-1; // //searching of roots // if( tnr==1 && nstep ) { // //is there roots? // if( nr>0 ) { // //is a next root equal a previous root? //if is't, then write new root // if( (double)(x0)!=(double)(tmpr[nr-1]) ) { tmpr[nr] = x0; nr = nr+1; } } else { // //write a first root // tmpr[nr] = x0; nr = nr+1; } } else { // //case when function at a segment identically to zero //then we have to clear a root, if the one located on a //constant segment // if( tnr==-1 ) { // //safe state variable as constant // if( nstep ) { nstep = false; } // //clear the root, if there is // if( nr>0 ) { if( (double)(c.x[i])==(double)(tmpr[nr-1]) ) { nr = nr-1; } } // //change state for 'DR' // if( !dr ) { dr = true; } } else { nstep = true; } } // //searching of extremums // if( i>0 ) { pll = c.c[4*(i-1)]; // //if pL=pLL or pL=pR then // if( tne==-1 ) { if( !de ) { de = true; } } else { if( (double)(pl)>(double)(pll) && (double)(pl)>(double)(pr) ) { // //maximum // tmpet[ne] = -1; tmpe[ne] = c.x[i]; ne = ne+1; } else { if( (double)(pl)<(double)(pll) && (double)(pl)<(double)(pr) ) { // //minimum // tmpet[ne] = 1; tmpe[ne] = c.x[i]; ne = ne+1; } } } } } // //write final result // apserv.rvectorsetlengthatleast(ref r, nr, _params); apserv.rvectorsetlengthatleast(ref e, ne, _params); apserv.ivectorsetlengthatleast(ref et, ne, _params); // //write roots // for(i=0; i<=nr-1; i++) { r[i] = tmpr[i]; } // //write extremums and their types // for(i=0; i<=ne-1; i++) { e[i] = tmpe[i]; et[i] = tmpet[i]; } } else { // //case, when C.Continuity>=1 //'TmpR ' - it stores a time value for roots //'TmpC' - it stores a time value for extremums and //their function value (TmpC={EX0,F(EX0), EX1,F(EX1), ..., EXn,F(EXn)};) //'TmpE' - it stores a time value for extremums only //'TmpET'- it stores a time value for extremums type // apserv.rvectorsetlengthatleast(ref tmpr, 2*c.n-1, _params); apserv.rvectorsetlengthatleast(ref tmpc, 4*c.n, _params); apserv.rvectorsetlengthatleast(ref tmpe, 2*c.n, _params); apserv.ivectorsetlengthatleast(ref tmpet, 2*c.n, _params); // //start calculating // for(i=0; i<=c.n-2; i++) { // //we calculate pL,mL, pR,mR as Fi+1(F'i+1) at left border // pl = c.c[4*i]; ml = c.c[4*i+1]; pr = c.c[4*(i+1)]; mr = c.c[4*(i+1)+1]; // //calculating roots and extremums at [X[i],X[i+1]] // solvecubicpolinom(pl, ml, pr, mr, c.x[i], c.x[i+1], ref x0, ref x1, ref x2, ref ex0, ref ex1, ref tnr, ref tne, ref tr, _params); // //searching roots // if( tnr>0 ) { // //re-init tR // if( tnr>=1 ) { tr[0] = x0; } if( tnr>=2 ) { tr[1] = x1; } if( tnr==3 ) { tr[2] = x2; } // //start root selection // if( nr>0 ) { if( (double)(tmpr[nr-1])!=(double)(x0) ) { // //previous segment was't constant identical zero // if( nstep ) { for(j=0; j<=tnr-1; j++) { tmpr[nr+j] = tr[j]; } nr = nr+tnr; } else { // //previous segment was constant identical zero //and we must ignore [NR+j-1] root // for(j=1; j<=tnr-1; j++) { tmpr[nr+j-1] = tr[j]; } nr = nr+tnr-1; nstep = true; } } else { for(j=1; j<=tnr-1; j++) { tmpr[nr+j-1] = tr[j]; } nr = nr+tnr-1; } } else { // //write first root // for(j=0; j<=tnr-1; j++) { tmpr[nr+j] = tr[j]; } nr = nr+tnr; } } else { if( tnr==-1 ) { // //decrement 'NR' if at previous step was writen a root //(previous segment identical zero) // if( nr>0 && nstep ) { nr = nr-1; } // //previous segment is't constant // if( nstep ) { nstep = false; } // //rewrite 'DR' // if( !dr ) { dr = true; } } } // //searching extremums //write all term like extremums // if( tne==1 ) { if( ne>0 ) { // //just ignore identical extremums //because he must be one // if( (double)(tmpc[ne-2])!=(double)(ex0) ) { tmpc[ne] = ex0; tmpc[ne+1] = c.c[4*i]+c.c[4*i+1]*(ex0-c.x[i])+c.c[4*i+2]*(ex0-c.x[i])*(ex0-c.x[i])+c.c[4*i+3]*(ex0-c.x[i])*(ex0-c.x[i])*(ex0-c.x[i]); ne = ne+2; } } else { // //write first extremum and it function value // tmpc[ne] = ex0; tmpc[ne+1] = c.c[4*i]+c.c[4*i+1]*(ex0-c.x[i])+c.c[4*i+2]*(ex0-c.x[i])*(ex0-c.x[i])+c.c[4*i+3]*(ex0-c.x[i])*(ex0-c.x[i])*(ex0-c.x[i]); ne = ne+2; } } else { if( tne==2 ) { if( ne>0 ) { // //ignore identical extremum // if( (double)(tmpc[ne-2])!=(double)(ex0) ) { tmpc[ne] = ex0; tmpc[ne+1] = c.c[4*i]+c.c[4*i+1]*(ex0-c.x[i])+c.c[4*i+2]*(ex0-c.x[i])*(ex0-c.x[i])+c.c[4*i+3]*(ex0-c.x[i])*(ex0-c.x[i])*(ex0-c.x[i]); ne = ne+2; } } else { // //write first extremum // tmpc[ne] = ex0; tmpc[ne+1] = c.c[4*i]+c.c[4*i+1]*(ex0-c.x[i])+c.c[4*i+2]*(ex0-c.x[i])*(ex0-c.x[i])+c.c[4*i+3]*(ex0-c.x[i])*(ex0-c.x[i])*(ex0-c.x[i]); ne = ne+2; } // //write second extremum // tmpc[ne] = ex1; tmpc[ne+1] = c.c[4*i]+c.c[4*i+1]*(ex1-c.x[i])+c.c[4*i+2]*(ex1-c.x[i])*(ex1-c.x[i])+c.c[4*i+3]*(ex1-c.x[i])*(ex1-c.x[i])*(ex1-c.x[i]); ne = ne+2; } else { if( tne==-1 ) { if( !de ) { de = true; } } } } } // //checking of arrays //get number of extremums (tNe=NE/2) //initialize pL as value F0(X[0]) and //initialize pR as value Fn-1(X[N]) // tne = ne/2; ne = 0; pl = c.c[0]; pr = c.c[4*(c.n-1)]; for(i=0; i<=tne-1; i++) { if( i>0 && i(double)(tmpc[2*(i-1)+1]) && (double)(tmpc[2*i+1])>(double)(tmpc[2*(i+1)+1]) ) { // //maximum // tmpe[ne] = tmpc[2*i]; tmpet[ne] = -1; ne = ne+1; } else { if( (double)(tmpc[2*i+1])<(double)(tmpc[2*(i-1)+1]) && (double)(tmpc[2*i+1])<(double)(tmpc[2*(i+1)+1]) ) { // //minimum // tmpe[ne] = tmpc[2*i]; tmpet[ne] = 1; ne = ne+1; } } } else { if( i==0 ) { if( (double)(tmpc[2*i])!=(double)(c.x[0]) ) { if( (double)(tmpc[2*i+1])>(double)(pl) && (double)(tmpc[2*i+1])>(double)(tmpc[2*(i+1)+1]) ) { // //maximum // tmpe[ne] = tmpc[2*i]; tmpet[ne] = -1; ne = ne+1; } else { if( (double)(tmpc[2*i+1])<(double)(pl) && (double)(tmpc[2*i+1])<(double)(tmpc[2*(i+1)+1]) ) { // //minimum // tmpe[ne] = tmpc[2*i]; tmpet[ne] = 1; ne = ne+1; } } } } else { if( i==tne-1 ) { if( (double)(tmpc[2*i])!=(double)(c.x[c.n-1]) ) { if( (double)(tmpc[2*i+1])>(double)(tmpc[2*(i-1)+1]) && (double)(tmpc[2*i+1])>(double)(pr) ) { // //maximum // tmpe[ne] = tmpc[2*i]; tmpet[ne] = -1; ne = ne+1; } else { if( (double)(tmpc[2*i+1])<(double)(tmpc[2*(i-1)+1]) && (double)(tmpc[2*i+1])<(double)(pr) ) { // //minimum // tmpe[ne] = tmpc[2*i]; tmpet[ne] = 1; ne = ne+1; } } } } } } } // //final results //allocate R, E, ET // apserv.rvectorsetlengthatleast(ref r, nr, _params); apserv.rvectorsetlengthatleast(ref e, ne, _params); apserv.ivectorsetlengthatleast(ref et, ne, _params); // //write result for extremus and their types // for(i=0; i<=ne-1; i++) { e[i] = tmpe[i]; et[i] = tmpet[i]; } // //write result for roots // for(i=0; i<=nr-1; i++) { r[i] = tmpr[i]; } } } /************************************************************************* Internal subroutine. Heap sort. *************************************************************************/ public static void heapsortdpoints(ref double[] x, ref double[] y, ref double[] d, int n, alglib.xparams _params) { double[] rbuf = new double[0]; int[] ibuf = new int[0]; double[] rbuf2 = new double[0]; int[] ibuf2 = new int[0]; int i = 0; int i_ = 0; ibuf = new int[n]; rbuf = new double[n]; for(i=0; i<=n-1; i++) { ibuf[i] = i; } tsort.tagsortfasti(ref x, ref ibuf, ref rbuf2, ref ibuf2, n, _params); for(i=0; i<=n-1; i++) { rbuf[i] = y[ibuf[i]]; } for(i_=0; i_<=n-1;i_++) { y[i_] = rbuf[i_]; } for(i=0; i<=n-1; i++) { rbuf[i] = d[ibuf[i]]; } for(i_=0; i_<=n-1;i_++) { d[i_] = rbuf[i_]; } } /************************************************************************* This procedure search roots of an quadratic equation inside [0;1] and it number of roots. INPUT PARAMETERS: P0 - value of a function at 0 M0 - value of a derivative at 0 P1 - value of a function at 1 M1 - value of a derivative at 1 OUTPUT PARAMETERS: X0 - first root of an equation X1 - second root of an equation NR - number of roots RESTRICTIONS OF PARAMETERS: Parameters for this procedure has't to be zero simultaneously. Is expected, that input polinom is't degenerate or constant identicaly ZERO. REMARK: The procedure always fill value for X1 and X2, even if it is't belongs to [0;1]. But first true root(even if existing one) is in X1. Number of roots is NR. -- ALGLIB PROJECT -- Copyright 26.09.2011 by Bochkanov Sergey *************************************************************************/ public static void solvepolinom2(double p0, double m0, double p1, double m1, ref double x0, ref double x1, ref int nr, alglib.xparams _params) { double a = 0; double b = 0; double c = 0; double dd = 0; double tmp = 0; double exf = 0; double extr = 0; x0 = 0; x1 = 0; nr = 0; // //calculate parameters for equation: A, B and C // a = 6*p0+3*m0-6*p1+3*m1; b = -(6*p0)-4*m0+6*p1-2*m1; c = m0; // //check case, when A=0 //we are considering the linear equation // if( (double)(a)==(double)(0) ) { // //B<>0 and root inside [0;1] //one root // if( ((double)(b)!=(double)(0) && Math.Sign(c)*Math.Sign(b)<=0) && (double)(Math.Abs(b))>=(double)(Math.Abs(c)) ) { x0 = -(c/b); nr = 1; return; } else { nr = 0; return; } } // //consider case, when extremumu outside (0;1) //exist one root only // if( (double)(Math.Abs(2*a))<=(double)(Math.Abs(b)) || Math.Sign(b)*Math.Sign(a)>=0 ) { if( Math.Sign(m0)*Math.Sign(m1)>0 ) { nr = 0; return; } // //consider case, when the one exist //same sign of derivative // if( Math.Sign(m0)*Math.Sign(m1)<0 ) { nr = 1; extr = -(b/(2*a)); dd = b*b-4*a*c; if( (double)(dd)<(double)(0) ) { return; } x0 = (-b-Math.Sqrt(dd))/(2*a); x1 = (-b+Math.Sqrt(dd))/(2*a); if( ((double)(extr)>=(double)(1) && (double)(x1)<=(double)(extr)) || ((double)(extr)<=(double)(0) && (double)(x1)>=(double)(extr)) ) { x0 = x1; } return; } // //consider case, when the one is 0 // if( (double)(m0)==(double)(0) ) { x0 = 0; nr = 1; return; } if( (double)(m1)==(double)(0) ) { x0 = 1; nr = 1; return; } } else { // //consider case, when both of derivatives is 0 // if( (double)(m0)==(double)(0) && (double)(m1)==(double)(0) ) { x0 = 0; x1 = 1; nr = 2; return; } // //consider case, when derivative at 0 is 0, and derivative at 1 is't 0 // if( (double)(m0)==(double)(0) && (double)(m1)!=(double)(0) ) { dd = b*b-4*a*c; if( (double)(dd)<(double)(0) ) { x0 = 0; nr = 1; return; } x0 = (-b-Math.Sqrt(dd))/(2*a); x1 = (-b+Math.Sqrt(dd))/(2*a); extr = -(b/(2*a)); exf = a*extr*extr+b*extr+c; if( Math.Sign(exf)*Math.Sign(m1)>0 ) { x0 = 0; nr = 1; return; } else { if( (double)(extr)>(double)(x0) ) { x0 = 0; } else { x1 = 0; } nr = 2; // //roots must placed ascending // if( (double)(x0)>(double)(x1) ) { tmp = x0; x0 = x1; x1 = tmp; } return; } } if( (double)(m1)==(double)(0) && (double)(m0)!=(double)(0) ) { dd = b*b-4*a*c; if( (double)(dd)<(double)(0) ) { x0 = 1; nr = 1; return; } x0 = (-b-Math.Sqrt(dd))/(2*a); x1 = (-b+Math.Sqrt(dd))/(2*a); extr = -(b/(2*a)); exf = a*extr*extr+b*extr+c; if( Math.Sign(exf)*Math.Sign(m0)>0 ) { x0 = 1; nr = 1; return; } else { if( (double)(extr)<(double)(x0) ) { x0 = 1; } else { x1 = 1; } nr = 2; // //roots must placed ascending // if( (double)(x0)>(double)(x1) ) { tmp = x0; x0 = x1; x1 = tmp; } return; } } else { extr = -(b/(2*a)); exf = a*extr*extr+b*extr+c; if( Math.Sign(exf)*Math.Sign(m0)>0 && Math.Sign(exf)*Math.Sign(m1)>0 ) { nr = 0; return; } dd = b*b-4*a*c; if( (double)(dd)<(double)(0) ) { nr = 0; return; } x0 = (-b-Math.Sqrt(dd))/(2*a); x1 = (-b+Math.Sqrt(dd))/(2*a); // //if EXF and m0, EXF and m1 has different signs, then equation has two roots // if( Math.Sign(exf)*Math.Sign(m0)<0 && Math.Sign(exf)*Math.Sign(m1)<0 ) { nr = 2; // //roots must placed ascending // if( (double)(x0)>(double)(x1) ) { tmp = x0; x0 = x1; x1 = tmp; } return; } else { nr = 1; if( Math.Sign(exf)*Math.Sign(m0)<0 ) { if( (double)(x1)<(double)(extr) ) { x0 = x1; } return; } if( Math.Sign(exf)*Math.Sign(m1)<0 ) { if( (double)(x1)>(double)(extr) ) { x0 = x1; } return; } } } } } /************************************************************************* This procedure search roots of an cubic equation inside [A;B], it number of roots and number of extremums. INPUT PARAMETERS: pA - value of a function at A mA - value of a derivative at A pB - value of a function at B mB - value of a derivative at B A0 - left border [A0;B0] B0 - right border [A0;B0] OUTPUT PARAMETERS: X0 - first root of an equation X1 - second root of an equation X2 - third root of an equation EX0 - first extremum of a function EX0 - second extremum of a function NR - number of roots NR - number of extrmums RESTRICTIONS OF PARAMETERS: Length of [A;B] must be positive and is't zero, i.e. A<>B and AB // alglib.ap.assert((double)(a)<(double)(b), "\nSolveCubicPolinom: incorrect borders for [A;B]!\n"); // //case 1 //function can be identicaly to ZERO // if( (((double)(ma)==(double)(0) && (double)(mb)==(double)(0)) && (double)(pa)==(double)(pb)) && (double)(pa)==(double)(0) ) { nr = -1; ne = -1; return; } if( ((double)(ma)==(double)(0) && (double)(mb)==(double)(0)) && (double)(pa)==(double)(pb) ) { nr = 0; ne = -1; return; } tmpma = ma*(b-a); tmpmb = mb*(b-a); solvepolinom2(pa, tmpma, pb, tmpmb, ref ex0, ref ex1, ref ne, _params); ex0 = rescaleval(0, 1, a, b, ex0, _params); ex1 = rescaleval(0, 1, a, b, ex1, _params); // //case 3.1 //no extremums at [A;B] // if( ne==0 ) { nr = bisectmethod(pa, tmpma, pb, tmpmb, 0, 1, ref x0, _params); if( nr==1 ) { x0 = rescaleval(0, 1, a, b, x0, _params); } return; } // //case 3.2 //one extremum // if( ne==1 ) { if( (double)(ex0)==(double)(a) || (double)(ex0)==(double)(b) ) { nr = bisectmethod(pa, tmpma, pb, tmpmb, 0, 1, ref x0, _params); if( nr==1 ) { x0 = rescaleval(0, 1, a, b, x0, _params); } return; } else { nr = 0; i = 0; tex0 = rescaleval(a, b, 0, 1, ex0, _params); nr = bisectmethod(pa, tmpma, pb, tmpmb, 0, tex0, ref x0, _params)+nr; if( nr>i ) { tempdata[i] = rescaleval(0, tex0, a, ex0, x0, _params); i = i+1; } nr = bisectmethod(pa, tmpma, pb, tmpmb, tex0, 1, ref x0, _params)+nr; if( nr>i ) { x0 = rescaleval(tex0, 1, ex0, b, x0, _params); if( i>0 ) { if( (double)(x0)!=(double)(tempdata[i-1]) ) { tempdata[i] = x0; i = i+1; } else { nr = nr-1; } } else { tempdata[i] = x0; i = i+1; } } if( nr>0 ) { x0 = tempdata[0]; if( nr>1 ) { x1 = tempdata[1]; } return; } } return; } else { // //case 3.3 //two extremums(or more, but it's impossible) // // //case 3.3.0 //both extremums at the border // if( (double)(ex0)==(double)(a) && (double)(ex1)==(double)(b) ) { nr = bisectmethod(pa, tmpma, pb, tmpmb, 0, 1, ref x0, _params); if( nr==1 ) { x0 = rescaleval(0, 1, a, b, x0, _params); } return; } if( (double)(ex0)==(double)(a) && (double)(ex1)!=(double)(b) ) { nr = 0; i = 0; tex1 = rescaleval(a, b, 0, 1, ex1, _params); nr = bisectmethod(pa, tmpma, pb, tmpmb, 0, tex1, ref x0, _params)+nr; if( nr>i ) { tempdata[i] = rescaleval(0, tex1, a, ex1, x0, _params); i = i+1; } nr = bisectmethod(pa, tmpma, pb, tmpmb, tex1, 1, ref x0, _params)+nr; if( nr>i ) { x0 = rescaleval(tex1, 1, ex1, b, x0, _params); if( (double)(x0)!=(double)(tempdata[i-1]) ) { tempdata[i] = x0; i = i+1; } else { nr = nr-1; } } if( nr>0 ) { x0 = tempdata[0]; if( nr>1 ) { x1 = tempdata[1]; } return; } } if( (double)(ex1)==(double)(b) && (double)(ex0)!=(double)(a) ) { nr = 0; i = 0; tex0 = rescaleval(a, b, 0, 1, ex0, _params); nr = bisectmethod(pa, tmpma, pb, tmpmb, 0, tex0, ref x0, _params)+nr; if( nr>i ) { tempdata[i] = rescaleval(0, tex0, a, ex0, x0, _params); i = i+1; } nr = bisectmethod(pa, tmpma, pb, tmpmb, tex0, 1, ref x0, _params)+nr; if( nr>i ) { x0 = rescaleval(tex0, 1, ex0, b, x0, _params); if( i>0 ) { if( (double)(x0)!=(double)(tempdata[i-1]) ) { tempdata[i] = x0; i = i+1; } else { nr = nr-1; } } else { tempdata[i] = x0; i = i+1; } } if( nr>0 ) { x0 = tempdata[0]; if( nr>1 ) { x1 = tempdata[1]; } return; } } else { // //case 3.3.2 //both extremums inside (0;1) // nr = 0; i = 0; tex0 = rescaleval(a, b, 0, 1, ex0, _params); tex1 = rescaleval(a, b, 0, 1, ex1, _params); nr = bisectmethod(pa, tmpma, pb, tmpmb, 0, tex0, ref x0, _params)+nr; if( nr>i ) { tempdata[i] = rescaleval(0, tex0, a, ex0, x0, _params); i = i+1; } nr = bisectmethod(pa, tmpma, pb, tmpmb, tex0, tex1, ref x0, _params)+nr; if( nr>i ) { x0 = rescaleval(tex0, tex1, ex0, ex1, x0, _params); if( i>0 ) { if( (double)(x0)!=(double)(tempdata[i-1]) ) { tempdata[i] = x0; i = i+1; } else { nr = nr-1; } } else { tempdata[i] = x0; i = i+1; } } nr = bisectmethod(pa, tmpma, pb, tmpmb, tex1, 1, ref x0, _params)+nr; if( nr>i ) { x0 = rescaleval(tex1, 1, ex1, b, x0, _params); if( i>0 ) { if( (double)(x0)!=(double)(tempdata[i-1]) ) { tempdata[i] = x0; i = i+1; } else { nr = nr-1; } } else { tempdata[i] = x0; i = i+1; } } // //write are found roots // if( nr>0 ) { x0 = tempdata[0]; if( nr>1 ) { x1 = tempdata[1]; } if( nr>2 ) { x2 = tempdata[2]; } return; } } } } /************************************************************************* Function for searching a root at [A;B] by bisection method and return number of roots (0 or 1) INPUT PARAMETERS: pA - value of a function at A mA - value of a derivative at A pB - value of a function at B mB - value of a derivative at B A0 - left border [A0;B0] B0 - right border [A0;B0] RESTRICTIONS OF PARAMETERS: We assume, that B0>A0. REMARK: Assume, that exist one root only at [A;B], else function may be work incorrectly. The function dont check value A0,B0! -- ALGLIB PROJECT -- Copyright 26.09.2011 by Bochkanov Sergey *************************************************************************/ public static int bisectmethod(double pa, double ma, double pb, double mb, double a, double b, ref double x, alglib.xparams _params) { int result = 0; double vacuum = 0; double eps = 0; double a0 = 0; double b0 = 0; double m = 0; double lf = 0; double rf = 0; double mf = 0; x = 0; // //accuracy // eps = 1000*(b-a)*math.machineepsilon; // //initialization left and right borders // a0 = a; b0 = b; // //initialize function value at 'A' and 'B' // hermitecalc(pa, ma, pb, mb, a, ref lf, ref vacuum, _params); hermitecalc(pa, ma, pb, mb, b, ref rf, ref vacuum, _params); // //check, that 'A' and 'B' are't roots, //and that root exist // if( Math.Sign(lf)*Math.Sign(rf)>0 ) { result = 0; return result; } else { if( (double)(lf)==(double)(0) ) { x = a; result = 1; return result; } else { if( (double)(rf)==(double)(0) ) { x = b; result = 1; return result; } } } // //searching a root // do { m = (b0+a0)/2; hermitecalc(pa, ma, pb, mb, a0, ref lf, ref vacuum, _params); hermitecalc(pa, ma, pb, mb, b0, ref rf, ref vacuum, _params); hermitecalc(pa, ma, pb, mb, m, ref mf, ref vacuum, _params); if( Math.Sign(mf)*Math.Sign(lf)<0 ) { b0 = m; } else { if( Math.Sign(mf)*Math.Sign(rf)<0 ) { a0 = m; } else { if( (double)(lf)==(double)(0) ) { x = a0; result = 1; return result; } if( (double)(rf)==(double)(0) ) { x = b0; result = 1; return result; } if( (double)(mf)==(double)(0) ) { x = m; result = 1; return result; } } } } while( (double)(Math.Abs(b0-a0))>=(double)(eps) ); x = m; result = 1; return result; } /************************************************************************* This function builds monotone cubic Hermite interpolant. This interpolant is monotonic in [x(0),x(n-1)] and is constant outside of this interval. In case y[] form non-monotonic sequence, interpolant is piecewise monotonic. Say, for x=(0,1,2,3,4) and y=(0,1,2,1,0) interpolant will monotonically grow at [0..2] and monotonically decrease at [2..4]. INPUT PARAMETERS: X - spline nodes, array[0..N-1]. Subroutine automatically sorts points, so caller may pass unsorted array. Y - function values, array[0..N-1] N - the number of points(N>=2). OUTPUT PARAMETERS: C - spline interpolant. -- ALGLIB PROJECT -- Copyright 21.06.2012 by Bochkanov Sergey *************************************************************************/ public static void spline1dbuildmonotone(double[] x, double[] y, int n, spline1dinterpolant c, alglib.xparams _params) { double[] d = new double[0]; double[] ex = new double[0]; double[] ey = new double[0]; int[] p = new int[0]; double delta = 0; double alpha = 0; double beta = 0; int tmpn = 0; int sn = 0; double ca = 0; double cb = 0; double epsilon = 0; int i = 0; int j = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); // // Check lengths of arguments // alglib.ap.assert(n>=2, "Spline1DBuildMonotone: N<2"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DBuildMonotone: Length(X)=n, "Spline1DBuildMonotone: Length(Y)n-2) ); if( (double)(ca)!=(double)(0) ) { ca = ca/Math.Abs(ca); } i = 0; while( i=(double)(0) ) { tmpn = tmpn+1; } else { ca = cb/Math.Abs(cb); break; } } sn = i+tmpn; alglib.ap.assert(tmpn>=2, "Spline1DBuildMonotone: internal error"); // // Calculate derivatives for current segment // d[i] = 0; d[sn-1] = 0; for(j=i+1; j<=sn-2; j++) { d[j] = ((ey[j]-ey[j-1])/(ex[j]-ex[j-1])+(ey[j+1]-ey[j])/(ex[j+1]-ex[j]))/2; } for(j=i; j<=sn-2; j++) { delta = (ey[j+1]-ey[j])/(ex[j+1]-ex[j]); if( (double)(Math.Abs(delta))<=(double)(epsilon) ) { d[j] = 0; d[j+1] = 0; } else { alpha = d[j]/delta; beta = d[j+1]/delta; if( (double)(alpha)!=(double)(0) ) { cb = alpha*Math.Sqrt(1+math.sqr(beta/alpha)); } else { if( (double)(beta)!=(double)(0) ) { cb = beta; } else { continue; } } if( (double)(cb)>(double)(3) ) { d[j] = 3*alpha*delta/cb; d[j+1] = 3*beta*delta/cb; } } } // // Transition to next segment // i = sn-1; } spline1dbuildhermite(ex, ey, d, n, c, _params); c.continuity = 2; } /************************************************************************* Internal version of Spline1DGridDiffCubic. Accepts pre-ordered X/Y, temporary arrays (which may be preallocated, if you want to save time, or not) and output array (which may be preallocated too). Y is passed as var-parameter because we may need to force last element to be equal to the first one (if periodic boundary conditions are specified). -- ALGLIB PROJECT -- Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ private static void spline1dgriddiffcubicinternal(double[] x, ref double[] y, int n, int boundltype, double boundl, int boundrtype, double boundr, ref double[] d, ref double[] a1, ref double[] a2, ref double[] a3, ref double[] b, ref double[] dt, alglib.xparams _params) { int i = 0; int i_ = 0; // // allocate arrays // if( alglib.ap.len(d)=0; k--) { x[k] = (d[k]-c[k]*x[k+1])/b[k]; } } /************************************************************************* Internal subroutine. Cyclic tridiagonal solver. Solves ( B[0] C[0] A[0] ) ( A[1] B[1] C[1] ) ( A[2] B[2] C[2] ) ( .......... ) * X = D ( .......... ) ( A[N-2] B[N-2] C[N-2] ) ( C[N-1] A[N-1] B[N-1] ) *************************************************************************/ private static void solvecyclictridiagonal(double[] a, double[] b, double[] c, double[] d, int n, ref double[] x, alglib.xparams _params) { int k = 0; double alpha = 0; double beta = 0; double gamma = 0; double[] y = new double[0]; double[] z = new double[0]; double[] u = new double[0]; b = (double[])b.Clone(); if( alglib.ap.len(x)A0 and B1>A1. But we chech, that T is inside [A0;B0], and if TB0 then T - B1. INPUT PARAMETERS: A0 - left border for segment [A0;B0] from 'T' is converted to [A1;B1] B0 - right border for segment [A0;B0] from 'T' is converted to [A1;B1] A1 - left border for segment [A1;B1] to 'T' is converted from [A0;B0] B1 - right border for segment [A1;B1] to 'T' is converted from [A0;B0] T - the parameter is mapped from [A0;B0] to [A1;B1] Result: is converted value for 'T' from [A0;B0] to [A1;B1] REMARK: The function dont check value A0,B0 and A1,B1! -- ALGLIB PROJECT -- Copyright 26.09.2011 by Bochkanov Sergey *************************************************************************/ private static double rescaleval(double a0, double b0, double a1, double b1, double t, alglib.xparams _params) { double result = 0; // //return left border // if( (double)(t)<=(double)(a0) ) { result = a1; return result; } // //return right border // if( (double)(t)>=(double)(b0) ) { result = b1; return result; } // //return value between left and right borders // result = (b1-a1)*(t-a0)/(b0-a0)+a1; return result; } } public class parametric { /************************************************************************* Parametric spline inteprolant: 2-dimensional curve. You should not try to access its members directly - use PSpline2XXXXXXXX() functions instead. *************************************************************************/ public class pspline2interpolant : apobject { public int n; public bool periodic; public double[] p; public spline1d.spline1dinterpolant x; public spline1d.spline1dinterpolant y; public pspline2interpolant() { init(); } public override void init() { p = new double[0]; x = new spline1d.spline1dinterpolant(); y = new spline1d.spline1dinterpolant(); } public override alglib.apobject make_copy() { pspline2interpolant _result = new pspline2interpolant(); _result.n = n; _result.periodic = periodic; _result.p = (double[])p.Clone(); _result.x = (spline1d.spline1dinterpolant)x.make_copy(); _result.y = (spline1d.spline1dinterpolant)y.make_copy(); return _result; } }; /************************************************************************* Parametric spline inteprolant: 3-dimensional curve. You should not try to access its members directly - use PSpline3XXXXXXXX() functions instead. *************************************************************************/ public class pspline3interpolant : apobject { public int n; public bool periodic; public double[] p; public spline1d.spline1dinterpolant x; public spline1d.spline1dinterpolant y; public spline1d.spline1dinterpolant z; public pspline3interpolant() { init(); } public override void init() { p = new double[0]; x = new spline1d.spline1dinterpolant(); y = new spline1d.spline1dinterpolant(); z = new spline1d.spline1dinterpolant(); } public override alglib.apobject make_copy() { pspline3interpolant _result = new pspline3interpolant(); _result.n = n; _result.periodic = periodic; _result.p = (double[])p.Clone(); _result.x = (spline1d.spline1dinterpolant)x.make_copy(); _result.y = (spline1d.spline1dinterpolant)y.make_copy(); _result.z = (spline1d.spline1dinterpolant)z.make_copy(); return _result; } }; /************************************************************************* This function builds non-periodic 2-dimensional parametric spline which starts at (X[0],Y[0]) and ends at (X[N-1],Y[N-1]). INPUT PARAMETERS: XY - points, array[0..N-1,0..1]. XY[I,0:1] corresponds to the Ith point. Order of points is important! N - points count, N>=5 for Akima splines, N>=2 for other types of splines. ST - spline type: * 0 Akima spline * 1 parabolically terminated Catmull-Rom spline (Tension=0) * 2 parabolically terminated cubic spline PT - parameterization type: * 0 uniform * 1 chord length * 2 centripetal OUTPUT PARAMETERS: P - parametric spline interpolant NOTES: * this function assumes that there all consequent points are distinct. I.e. (x0,y0)<>(x1,y1), (x1,y1)<>(x2,y2), (x2,y2)<>(x3,y3) and so on. However, non-consequent points may coincide, i.e. we can have (x0,y0)= =(x2,y2). -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline2build(double[,] xy, int n, int st, int pt, pspline2interpolant p, alglib.xparams _params) { double[] tmp = new double[0]; int i_ = 0; xy = (double[,])xy.Clone(); alglib.ap.assert(st>=0 && st<=2, "PSpline2Build: incorrect spline type!"); alglib.ap.assert(pt>=0 && pt<=2, "PSpline2Build: incorrect parameterization type!"); if( st==0 ) { alglib.ap.assert(n>=5, "PSpline2Build: N<5 (minimum value for Akima splines)!"); } else { alglib.ap.assert(n>=2, "PSpline2Build: N<2!"); } // // Prepare // p.n = n; p.periodic = false; tmp = new double[n]; // // Build parameterization, check that all parameters are distinct // pspline2par(xy, n, pt, ref p.p, _params); alglib.ap.assert(apserv.aredistinct(p.p, n, _params), "PSpline2Build: consequent points are too close!"); // // Build splines // if( st==0 ) { for(i_=0; i_<=n-1;i_++) { tmp[i_] = xy[i_,0]; } spline1d.spline1dbuildakima(p.p, tmp, n, p.x, _params); for(i_=0; i_<=n-1;i_++) { tmp[i_] = xy[i_,1]; } spline1d.spline1dbuildakima(p.p, tmp, n, p.y, _params); } if( st==1 ) { for(i_=0; i_<=n-1;i_++) { tmp[i_] = xy[i_,0]; } spline1d.spline1dbuildcatmullrom(p.p, tmp, n, 0, 0.0, p.x, _params); for(i_=0; i_<=n-1;i_++) { tmp[i_] = xy[i_,1]; } spline1d.spline1dbuildcatmullrom(p.p, tmp, n, 0, 0.0, p.y, _params); } if( st==2 ) { for(i_=0; i_<=n-1;i_++) { tmp[i_] = xy[i_,0]; } spline1d.spline1dbuildcubic(p.p, tmp, n, 0, 0.0, 0, 0.0, p.x, _params); for(i_=0; i_<=n-1;i_++) { tmp[i_] = xy[i_,1]; } spline1d.spline1dbuildcubic(p.p, tmp, n, 0, 0.0, 0, 0.0, p.y, _params); } } /************************************************************************* This function builds non-periodic 3-dimensional parametric spline which starts at (X[0],Y[0],Z[0]) and ends at (X[N-1],Y[N-1],Z[N-1]). Same as PSpline2Build() function, but for 3D, so we won't duplicate its description here. -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline3build(double[,] xy, int n, int st, int pt, pspline3interpolant p, alglib.xparams _params) { double[] tmp = new double[0]; int i_ = 0; xy = (double[,])xy.Clone(); alglib.ap.assert(st>=0 && st<=2, "PSpline3Build: incorrect spline type!"); alglib.ap.assert(pt>=0 && pt<=2, "PSpline3Build: incorrect parameterization type!"); if( st==0 ) { alglib.ap.assert(n>=5, "PSpline3Build: N<5 (minimum value for Akima splines)!"); } else { alglib.ap.assert(n>=2, "PSpline3Build: N<2!"); } // // Prepare // p.n = n; p.periodic = false; tmp = new double[n]; // // Build parameterization, check that all parameters are distinct // pspline3par(xy, n, pt, ref p.p, _params); alglib.ap.assert(apserv.aredistinct(p.p, n, _params), "PSpline3Build: consequent points are too close!"); // // Build splines // if( st==0 ) { for(i_=0; i_<=n-1;i_++) { tmp[i_] = xy[i_,0]; } spline1d.spline1dbuildakima(p.p, tmp, n, p.x, _params); for(i_=0; i_<=n-1;i_++) { tmp[i_] = xy[i_,1]; } spline1d.spline1dbuildakima(p.p, tmp, n, p.y, _params); for(i_=0; i_<=n-1;i_++) { tmp[i_] = xy[i_,2]; } spline1d.spline1dbuildakima(p.p, tmp, n, p.z, _params); } if( st==1 ) { for(i_=0; i_<=n-1;i_++) { tmp[i_] = xy[i_,0]; } spline1d.spline1dbuildcatmullrom(p.p, tmp, n, 0, 0.0, p.x, _params); for(i_=0; i_<=n-1;i_++) { tmp[i_] = xy[i_,1]; } spline1d.spline1dbuildcatmullrom(p.p, tmp, n, 0, 0.0, p.y, _params); for(i_=0; i_<=n-1;i_++) { tmp[i_] = xy[i_,2]; } spline1d.spline1dbuildcatmullrom(p.p, tmp, n, 0, 0.0, p.z, _params); } if( st==2 ) { for(i_=0; i_<=n-1;i_++) { tmp[i_] = xy[i_,0]; } spline1d.spline1dbuildcubic(p.p, tmp, n, 0, 0.0, 0, 0.0, p.x, _params); for(i_=0; i_<=n-1;i_++) { tmp[i_] = xy[i_,1]; } spline1d.spline1dbuildcubic(p.p, tmp, n, 0, 0.0, 0, 0.0, p.y, _params); for(i_=0; i_<=n-1;i_++) { tmp[i_] = xy[i_,2]; } spline1d.spline1dbuildcubic(p.p, tmp, n, 0, 0.0, 0, 0.0, p.z, _params); } } /************************************************************************* This function builds periodic 2-dimensional parametric spline which starts at (X[0],Y[0]), goes through all points to (X[N-1],Y[N-1]) and then back to (X[0],Y[0]). INPUT PARAMETERS: XY - points, array[0..N-1,0..1]. XY[I,0:1] corresponds to the Ith point. XY[N-1,0:1] must be different from XY[0,0:1]. Order of points is important! N - points count, N>=3 for other types of splines. ST - spline type: * 1 Catmull-Rom spline (Tension=0) with cyclic boundary conditions * 2 cubic spline with cyclic boundary conditions PT - parameterization type: * 0 uniform * 1 chord length * 2 centripetal OUTPUT PARAMETERS: P - parametric spline interpolant NOTES: * this function assumes that there all consequent points are distinct. I.e. (x0,y0)<>(x1,y1), (x1,y1)<>(x2,y2), (x2,y2)<>(x3,y3) and so on. However, non-consequent points may coincide, i.e. we can have (x0,y0)= =(x2,y2). * last point of sequence is NOT equal to the first point. You shouldn't make curve "explicitly periodic" by making them equal. -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline2buildperiodic(double[,] xy, int n, int st, int pt, pspline2interpolant p, alglib.xparams _params) { double[,] xyp = new double[0,0]; double[] tmp = new double[0]; int i_ = 0; xy = (double[,])xy.Clone(); alglib.ap.assert(st>=1 && st<=2, "PSpline2BuildPeriodic: incorrect spline type!"); alglib.ap.assert(pt>=0 && pt<=2, "PSpline2BuildPeriodic: incorrect parameterization type!"); alglib.ap.assert(n>=3, "PSpline2BuildPeriodic: N<3!"); // // Prepare // p.n = n; p.periodic = true; tmp = new double[n+1]; xyp = new double[n+1, 2]; for(i_=0; i_<=n-1;i_++) { xyp[i_,0] = xy[i_,0]; } for(i_=0; i_<=n-1;i_++) { xyp[i_,1] = xy[i_,1]; } for(i_=0; i_<=1;i_++) { xyp[n,i_] = xy[0,i_]; } // // Build parameterization, check that all parameters are distinct // pspline2par(xyp, n+1, pt, ref p.p, _params); alglib.ap.assert(apserv.aredistinct(p.p, n+1, _params), "PSpline2BuildPeriodic: consequent (or first and last) points are too close!"); // // Build splines // if( st==1 ) { for(i_=0; i_<=n;i_++) { tmp[i_] = xyp[i_,0]; } spline1d.spline1dbuildcatmullrom(p.p, tmp, n+1, -1, 0.0, p.x, _params); for(i_=0; i_<=n;i_++) { tmp[i_] = xyp[i_,1]; } spline1d.spline1dbuildcatmullrom(p.p, tmp, n+1, -1, 0.0, p.y, _params); } if( st==2 ) { for(i_=0; i_<=n;i_++) { tmp[i_] = xyp[i_,0]; } spline1d.spline1dbuildcubic(p.p, tmp, n+1, -1, 0.0, -1, 0.0, p.x, _params); for(i_=0; i_<=n;i_++) { tmp[i_] = xyp[i_,1]; } spline1d.spline1dbuildcubic(p.p, tmp, n+1, -1, 0.0, -1, 0.0, p.y, _params); } } /************************************************************************* This function builds periodic 3-dimensional parametric spline which starts at (X[0],Y[0],Z[0]), goes through all points to (X[N-1],Y[N-1],Z[N-1]) and then back to (X[0],Y[0],Z[0]). Same as PSpline2Build() function, but for 3D, so we won't duplicate its description here. -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline3buildperiodic(double[,] xy, int n, int st, int pt, pspline3interpolant p, alglib.xparams _params) { double[,] xyp = new double[0,0]; double[] tmp = new double[0]; int i_ = 0; xy = (double[,])xy.Clone(); alglib.ap.assert(st>=1 && st<=2, "PSpline3BuildPeriodic: incorrect spline type!"); alglib.ap.assert(pt>=0 && pt<=2, "PSpline3BuildPeriodic: incorrect parameterization type!"); alglib.ap.assert(n>=3, "PSpline3BuildPeriodic: N<3!"); // // Prepare // p.n = n; p.periodic = true; tmp = new double[n+1]; xyp = new double[n+1, 3]; for(i_=0; i_<=n-1;i_++) { xyp[i_,0] = xy[i_,0]; } for(i_=0; i_<=n-1;i_++) { xyp[i_,1] = xy[i_,1]; } for(i_=0; i_<=n-1;i_++) { xyp[i_,2] = xy[i_,2]; } for(i_=0; i_<=2;i_++) { xyp[n,i_] = xy[0,i_]; } // // Build parameterization, check that all parameters are distinct // pspline3par(xyp, n+1, pt, ref p.p, _params); alglib.ap.assert(apserv.aredistinct(p.p, n+1, _params), "PSplineBuild2Periodic: consequent (or first and last) points are too close!"); // // Build splines // if( st==1 ) { for(i_=0; i_<=n;i_++) { tmp[i_] = xyp[i_,0]; } spline1d.spline1dbuildcatmullrom(p.p, tmp, n+1, -1, 0.0, p.x, _params); for(i_=0; i_<=n;i_++) { tmp[i_] = xyp[i_,1]; } spline1d.spline1dbuildcatmullrom(p.p, tmp, n+1, -1, 0.0, p.y, _params); for(i_=0; i_<=n;i_++) { tmp[i_] = xyp[i_,2]; } spline1d.spline1dbuildcatmullrom(p.p, tmp, n+1, -1, 0.0, p.z, _params); } if( st==2 ) { for(i_=0; i_<=n;i_++) { tmp[i_] = xyp[i_,0]; } spline1d.spline1dbuildcubic(p.p, tmp, n+1, -1, 0.0, -1, 0.0, p.x, _params); for(i_=0; i_<=n;i_++) { tmp[i_] = xyp[i_,1]; } spline1d.spline1dbuildcubic(p.p, tmp, n+1, -1, 0.0, -1, 0.0, p.y, _params); for(i_=0; i_<=n;i_++) { tmp[i_] = xyp[i_,2]; } spline1d.spline1dbuildcubic(p.p, tmp, n+1, -1, 0.0, -1, 0.0, p.z, _params); } } /************************************************************************* This function returns vector of parameter values correspoding to points. I.e. for P created from (X[0],Y[0])...(X[N-1],Y[N-1]) and U=TValues(P) we have (X[0],Y[0]) = PSpline2Calc(P,U[0]), (X[1],Y[1]) = PSpline2Calc(P,U[1]), (X[2],Y[2]) = PSpline2Calc(P,U[2]), ... INPUT PARAMETERS: P - parametric spline interpolant OUTPUT PARAMETERS: N - array size T - array[0..N-1] NOTES: * for non-periodic splines U[0]=0, U[0]=2, "PSpline2ParameterValues: internal error!"); n = p.n; t = new double[n]; for(i_=0; i_<=n-1;i_++) { t[i_] = p.p[i_]; } t[0] = 0; if( !p.periodic ) { t[n-1] = 1; } } /************************************************************************* This function returns vector of parameter values correspoding to points. Same as PSpline2ParameterValues(), but for 3D. -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline3parametervalues(pspline3interpolant p, ref int n, ref double[] t, alglib.xparams _params) { int i_ = 0; n = 0; t = new double[0]; alglib.ap.assert(p.n>=2, "PSpline3ParameterValues: internal error!"); n = p.n; t = new double[n]; for(i_=0; i_<=n-1;i_++) { t[i_] = p.p[i_]; } t[0] = 0; if( !p.periodic ) { t[n-1] = 1; } } /************************************************************************* This function calculates the value of the parametric spline for a given value of parameter T INPUT PARAMETERS: P - parametric spline interpolant T - point: * T in [0,1] corresponds to interval spanned by points * for non-periodic splines T<0 (or T>1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-position Y - Y-position -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline2calc(pspline2interpolant p, double t, ref double x, ref double y, alglib.xparams _params) { x = 0; y = 0; if( p.periodic ) { t = t-(int)Math.Floor(t); } x = spline1d.spline1dcalc(p.x, t, _params); y = spline1d.spline1dcalc(p.y, t, _params); } /************************************************************************* This function calculates the value of the parametric spline for a given value of parameter T. INPUT PARAMETERS: P - parametric spline interpolant T - point: * T in [0,1] corresponds to interval spanned by points * for non-periodic splines T<0 (or T>1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-position Y - Y-position Z - Z-position -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline3calc(pspline3interpolant p, double t, ref double x, ref double y, ref double z, alglib.xparams _params) { x = 0; y = 0; z = 0; if( p.periodic ) { t = t-(int)Math.Floor(t); } x = spline1d.spline1dcalc(p.x, t, _params); y = spline1d.spline1dcalc(p.y, t, _params); z = spline1d.spline1dcalc(p.z, t, _params); } /************************************************************************* This function calculates tangent vector for a given value of parameter T INPUT PARAMETERS: P - parametric spline interpolant T - point: * T in [0,1] corresponds to interval spanned by points * for non-periodic splines T<0 (or T>1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-component of tangent vector (normalized) Y - Y-component of tangent vector (normalized) NOTE: X^2+Y^2 is either 1 (for non-zero tangent vector) or 0. -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline2tangent(pspline2interpolant p, double t, ref double x, ref double y, alglib.xparams _params) { double v = 0; double v0 = 0; double v1 = 0; x = 0; y = 0; if( p.periodic ) { t = t-(int)Math.Floor(t); } pspline2diff(p, t, ref v0, ref x, ref v1, ref y, _params); if( (double)(x)!=(double)(0) || (double)(y)!=(double)(0) ) { // // this code is a bit more complex than X^2+Y^2 to avoid // overflow for large values of X and Y. // v = apserv.safepythag2(x, y, _params); x = x/v; y = y/v; } } /************************************************************************* This function calculates tangent vector for a given value of parameter T INPUT PARAMETERS: P - parametric spline interpolant T - point: * T in [0,1] corresponds to interval spanned by points * for non-periodic splines T<0 (or T>1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-component of tangent vector (normalized) Y - Y-component of tangent vector (normalized) Z - Z-component of tangent vector (normalized) NOTE: X^2+Y^2+Z^2 is either 1 (for non-zero tangent vector) or 0. -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline3tangent(pspline3interpolant p, double t, ref double x, ref double y, ref double z, alglib.xparams _params) { double v = 0; double v0 = 0; double v1 = 0; double v2 = 0; x = 0; y = 0; z = 0; if( p.periodic ) { t = t-(int)Math.Floor(t); } pspline3diff(p, t, ref v0, ref x, ref v1, ref y, ref v2, ref z, _params); if( ((double)(x)!=(double)(0) || (double)(y)!=(double)(0)) || (double)(z)!=(double)(0) ) { v = apserv.safepythag3(x, y, z, _params); x = x/v; y = y/v; z = z/v; } } /************************************************************************* This function calculates derivative, i.e. it returns (dX/dT,dY/dT). INPUT PARAMETERS: P - parametric spline interpolant T - point: * T in [0,1] corresponds to interval spanned by points * for non-periodic splines T<0 (or T>1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-value DX - X-derivative Y - Y-value DY - Y-derivative -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline2diff(pspline2interpolant p, double t, ref double x, ref double dx, ref double y, ref double dy, alglib.xparams _params) { double d2s = 0; x = 0; dx = 0; y = 0; dy = 0; if( p.periodic ) { t = t-(int)Math.Floor(t); } spline1d.spline1ddiff(p.x, t, ref x, ref dx, ref d2s, _params); spline1d.spline1ddiff(p.y, t, ref y, ref dy, ref d2s, _params); } /************************************************************************* This function calculates derivative, i.e. it returns (dX/dT,dY/dT,dZ/dT). INPUT PARAMETERS: P - parametric spline interpolant T - point: * T in [0,1] corresponds to interval spanned by points * for non-periodic splines T<0 (or T>1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-value DX - X-derivative Y - Y-value DY - Y-derivative Z - Z-value DZ - Z-derivative -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline3diff(pspline3interpolant p, double t, ref double x, ref double dx, ref double y, ref double dy, ref double z, ref double dz, alglib.xparams _params) { double d2s = 0; x = 0; dx = 0; y = 0; dy = 0; z = 0; dz = 0; if( p.periodic ) { t = t-(int)Math.Floor(t); } spline1d.spline1ddiff(p.x, t, ref x, ref dx, ref d2s, _params); spline1d.spline1ddiff(p.y, t, ref y, ref dy, ref d2s, _params); spline1d.spline1ddiff(p.z, t, ref z, ref dz, ref d2s, _params); } /************************************************************************* This function calculates first and second derivative with respect to T. INPUT PARAMETERS: P - parametric spline interpolant T - point: * T in [0,1] corresponds to interval spanned by points * for non-periodic splines T<0 (or T>1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-value DX - derivative D2X - second derivative Y - Y-value DY - derivative D2Y - second derivative -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline2diff2(pspline2interpolant p, double t, ref double x, ref double dx, ref double d2x, ref double y, ref double dy, ref double d2y, alglib.xparams _params) { x = 0; dx = 0; d2x = 0; y = 0; dy = 0; d2y = 0; if( p.periodic ) { t = t-(int)Math.Floor(t); } spline1d.spline1ddiff(p.x, t, ref x, ref dx, ref d2x, _params); spline1d.spline1ddiff(p.y, t, ref y, ref dy, ref d2y, _params); } /************************************************************************* This function calculates first and second derivative with respect to T. INPUT PARAMETERS: P - parametric spline interpolant T - point: * T in [0,1] corresponds to interval spanned by points * for non-periodic splines T<0 (or T>1) correspond to parts of the curve before the first (after the last) point * for periodic splines T<0 (or T>1) are projected into [0,1] by making T=T-floor(T). OUTPUT PARAMETERS: X - X-value DX - derivative D2X - second derivative Y - Y-value DY - derivative D2Y - second derivative Z - Z-value DZ - derivative D2Z - second derivative -- ALGLIB PROJECT -- Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ public static void pspline3diff2(pspline3interpolant p, double t, ref double x, ref double dx, ref double d2x, ref double y, ref double dy, ref double d2y, ref double z, ref double dz, ref double d2z, alglib.xparams _params) { x = 0; dx = 0; d2x = 0; y = 0; dy = 0; d2y = 0; z = 0; dz = 0; d2z = 0; if( p.periodic ) { t = t-(int)Math.Floor(t); } spline1d.spline1ddiff(p.x, t, ref x, ref dx, ref d2x, _params); spline1d.spline1ddiff(p.y, t, ref y, ref dy, ref d2y, _params); spline1d.spline1ddiff(p.z, t, ref z, ref dz, ref d2z, _params); } /************************************************************************* This function calculates arc length, i.e. length of curve between t=a and t=b. INPUT PARAMETERS: P - parametric spline interpolant A,B - parameter values corresponding to arc ends: * B>A will result in positive length returned * B0, "PSpline2ArcLength: internal error!"); return result; } /************************************************************************* This function calculates arc length, i.e. length of curve between t=a and t=b. INPUT PARAMETERS: P - parametric spline interpolant A,B - parameter values corresponding to arc ends: * B>A will result in positive length returned * B0, "PSpline3ArcLength: internal error!"); return result; } /************************************************************************* This subroutine fits piecewise linear curve to points with Ramer-Douglas- Peucker algorithm. This function performs PARAMETRIC fit, i.e. it can be used to fit curves like circles. On input it accepts dataset which describes parametric multidimensional curve X(t), with X being vector, and t taking values in [0,N), where N is a number of points in dataset. As result, it returns reduced dataset X2, which can be used to build parametric curve X2(t), which approximates X(t) with desired precision (or has specified number of sections). INPUT PARAMETERS: X - array of multidimensional points: * at least N elements, leading N elements are used if more than N elements were specified * order of points is IMPORTANT because it is parametric fit * each row of array is one point which has D coordinates N - number of elements in X D - number of dimensions (elements per row of X) StopM - stopping condition - desired number of sections: * at most M sections are generated by this function * less than M sections can be generated if we have N=0, "LSTFitPiecewiseLinearParametricRDP: N<0"); alglib.ap.assert(d>=1, "LSTFitPiecewiseLinearParametricRDP: D<=0"); alglib.ap.assert(stopm>=0, "LSTFitPiecewiseLinearParametricRDP: StopM<1"); alglib.ap.assert(math.isfinite(stopeps) && (double)(stopeps)>=(double)(0), "LSTFitPiecewiseLinearParametricRDP: StopEps<0 or is infinite"); alglib.ap.assert(alglib.ap.rows(x)>=n, "LSTFitPiecewiseLinearParametricRDP: Rows(X)=d, "LSTFitPiecewiseLinearParametricRDP: Cols(X)(double)(0) && (double)(heaperrors[0])<=(double)(stopeps) ) { break; } if( stopm>0 && nsections>=stopm ) { break; } k = heaptags[0]; // // K-th section is divided in two: // * first one spans interval from X[Sections[K,0]] to X[Sections[K,2]] // * second one spans interval from X[Sections[K,2]] to X[Sections[K,1]] // // First section is stored at K-th position, second one is appended to the table. // Then we update heap which stores pairs of (error,section_index) // k0 = (int)Math.Round(sections[k,0]); k1 = (int)Math.Round(sections[k,1]); k2 = (int)Math.Round(sections[k,2]); rdpanalyzesectionpar(x, k0, k2, d, ref idx0, ref e0, _params); rdpanalyzesectionpar(x, k2, k1, d, ref idx1, ref e1, _params); sections[k,0] = k0; sections[k,1] = k2; sections[k,2] = idx0; sections[k,3] = e0; tsort.tagheapreplacetopi(ref heaperrors, ref heaptags, nsections, e0, k, _params); sections[nsections,0] = k2; sections[nsections,1] = k1; sections[nsections,2] = idx1; sections[nsections,3] = e1; tsort.tagheappushi(ref heaperrors, ref heaptags, ref nsections, e1, nsections, _params); } // // Convert from sections to indexes // buf0 = new double[nsections+1]; for(i=0; i<=nsections-1; i++) { buf0[i] = (int)Math.Round(sections[i,0]); } buf0[nsections] = n-1; tsort.tagsortfast(ref buf0, ref buf1, nsections+1, _params); idx2 = new int[nsections+1]; for(i=0; i<=nsections; i++) { idx2[i] = (int)Math.Round(buf0[i]); } alglib.ap.assert(idx2[0]==0, "RDP algorithm: integrity check failed"); alglib.ap.assert(idx2[nsections]==n-1, "RDP algorithm: integrity check failed"); // // Output sections: // * first NSection elements of X2/Y2 are filled by x/y at left boundaries of sections // * last element of X2/Y2 is filled by right boundary of rightmost section // * X2/Y2 is sorted by ascending of X2 // x2 = new double[nsections+1, d]; for(i=0; i<=nsections; i++) { for(j=0; j<=d-1; j++) { x2[i,j] = x[idx2[i],j]; } } } /************************************************************************* Builds non-periodic parameterization for 2-dimensional spline *************************************************************************/ private static void pspline2par(double[,] xy, int n, int pt, ref double[] p, alglib.xparams _params) { double v = 0; int i = 0; int i_ = 0; p = new double[0]; alglib.ap.assert(pt>=0 && pt<=2, "PSpline2Par: internal error!"); // // Build parameterization: // * fill by non-normalized values // * normalize them so we have P[0]=0, P[N-1]=1. // p = new double[n]; if( pt==0 ) { for(i=0; i<=n-1; i++) { p[i] = i; } } if( pt==1 ) { p[0] = 0; for(i=1; i<=n-1; i++) { p[i] = p[i-1]+apserv.safepythag2(xy[i,0]-xy[i-1,0], xy[i,1]-xy[i-1,1], _params); } } if( pt==2 ) { p[0] = 0; for(i=1; i<=n-1; i++) { p[i] = p[i-1]+Math.Sqrt(apserv.safepythag2(xy[i,0]-xy[i-1,0], xy[i,1]-xy[i-1,1], _params)); } } v = 1/p[n-1]; for(i_=0; i_<=n-1;i_++) { p[i_] = v*p[i_]; } } /************************************************************************* Builds non-periodic parameterization for 3-dimensional spline *************************************************************************/ private static void pspline3par(double[,] xy, int n, int pt, ref double[] p, alglib.xparams _params) { double v = 0; int i = 0; int i_ = 0; p = new double[0]; alglib.ap.assert(pt>=0 && pt<=2, "PSpline3Par: internal error!"); // // Build parameterization: // * fill by non-normalized values // * normalize them so we have P[0]=0, P[N-1]=1. // p = new double[n]; if( pt==0 ) { for(i=0; i<=n-1; i++) { p[i] = i; } } if( pt==1 ) { p[0] = 0; for(i=1; i<=n-1; i++) { p[i] = p[i-1]+apserv.safepythag3(xy[i,0]-xy[i-1,0], xy[i,1]-xy[i-1,1], xy[i,2]-xy[i-1,2], _params); } } if( pt==2 ) { p[0] = 0; for(i=1; i<=n-1; i++) { p[i] = p[i-1]+Math.Sqrt(apserv.safepythag3(xy[i,0]-xy[i-1,0], xy[i,1]-xy[i-1,1], xy[i,2]-xy[i-1,2], _params)); } } v = 1/p[n-1]; for(i_=0; i_<=n-1;i_++) { p[i_] = v*p[i_]; } } /************************************************************************* This function analyzes section of curve for processing by RDP algorithm: given set of points X,Y with indexes [I0,I1] it returns point with worst deviation from linear model (PARAMETRIC version which sees curve as X(t) with vector X). Input parameters: XY - array I0,I1 - interval (boundaries included) to process D - number of dimensions OUTPUT PARAMETERS: WorstIdx - index of worst point WorstError - error at worst point NOTE: this function guarantees that it returns exactly zero for a section with less than 3 points. -- ALGLIB PROJECT -- Copyright 02.10.2014 by Bochkanov Sergey *************************************************************************/ private static void rdpanalyzesectionpar(double[,] xy, int i0, int i1, int d, ref int worstidx, ref double worsterror, alglib.xparams _params) { int i = 0; int j = 0; double v = 0; double d2 = 0; double ts = 0; double vv = 0; worstidx = 0; worsterror = 0; // // Quick exit for 0, 1, 2 points // if( i1-i0+1<3 ) { worstidx = i0; worsterror = 0.0; return; } // // Estimate D2 - squared distance between XY[I1] and XY[I0]. // In case D2=0 handle it as special case. // d2 = 0.0; for(j=0; j<=d-1; j++) { d2 = d2+math.sqr(xy[i1,j]-xy[i0,j]); } if( (double)(d2)==(double)(0) ) { // // First and last points are equal, interval evaluation is // trivial - we just calculate distance from all points to // the first/last one. // worstidx = i0; worsterror = 0.0; for(i=i0+1; i<=i1-1; i++) { vv = 0.0; for(j=0; j<=d-1; j++) { v = xy[i,j]-xy[i0,j]; vv = vv+v*v; } vv = Math.Sqrt(vv); if( (double)(vv)>(double)(worsterror) ) { worsterror = vv; worstidx = i; } } return; } // // General case // // Current section of curve is modeled as x(t) = d*t+c, where // d = XY[I1]-XY[I0] // c = XY[I0] // t is in [0,1] // worstidx = i0; worsterror = 0.0; for(i=i0+1; i<=i1-1; i++) { // // Determine t_s - parameter value for projected point. // ts = (double)(i-i0)/(double)(i1-i0); // // Estimate error norm // vv = 0.0; for(j=0; j<=d-1; j++) { v = (xy[i1,j]-xy[i0,j])*ts-(xy[i,j]-xy[i0,j]); vv = vv+math.sqr(v); } vv = Math.Sqrt(vv); if( (double)(vv)>(double)(worsterror) ) { worsterror = vv; worstidx = i; } } } } public class spline3d { /************************************************************************* 3-dimensional spline inteprolant *************************************************************************/ public class spline3dinterpolant : apobject { public int k; public int stype; public int n; public int m; public int l; public int d; public double[] x; public double[] y; public double[] z; public double[] f; public spline3dinterpolant() { init(); } public override void init() { x = new double[0]; y = new double[0]; z = new double[0]; f = new double[0]; } public override alglib.apobject make_copy() { spline3dinterpolant _result = new spline3dinterpolant(); _result.k = k; _result.stype = stype; _result.n = n; _result.m = m; _result.l = l; _result.d = d; _result.x = (double[])x.Clone(); _result.y = (double[])y.Clone(); _result.z = (double[])z.Clone(); _result.f = (double[])f.Clone(); return _result; } }; /************************************************************************* This subroutine calculates the value of the trilinear or tricubic spline at the given point (X,Y,Z). INPUT PARAMETERS: C - coefficients table. Built by BuildBilinearSpline or BuildBicubicSpline. X, Y, Z - point Result: S(x,y,z) -- ALGLIB PROJECT -- Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ public static double spline3dcalc(spline3dinterpolant c, double x, double y, double z, alglib.xparams _params) { double result = 0; double v = 0; double vx = 0; double vy = 0; double vxy = 0; alglib.ap.assert(c.stype==-1 || c.stype==-3, "Spline3DCalc: incorrect C (incorrect parameter C.SType)"); alglib.ap.assert((math.isfinite(x) && math.isfinite(y)) && math.isfinite(z), "Spline3DCalc: X=NaN/Infinite, Y=NaN/Infinite or Z=NaN/Infinite"); if( c.d!=1 ) { result = 0; return result; } spline3ddiff(c, x, y, z, ref v, ref vx, ref vy, ref vxy, _params); result = v; return result; } /************************************************************************* This subroutine performs linear transformation of the spline argument. INPUT PARAMETERS: C - spline interpolant AX, BX - transformation coefficients: x = A*u + B AY, BY - transformation coefficients: y = A*v + B AZ, BZ - transformation coefficients: z = A*w + B OUTPUT PARAMETERS: C - transformed spline -- ALGLIB PROJECT -- Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline3dlintransxyz(spline3dinterpolant c, double ax, double bx, double ay, double by, double az, double bz, alglib.xparams _params) { double[] x = new double[0]; double[] y = new double[0]; double[] z = new double[0]; double[] f = new double[0]; double[] v = new double[0]; int i = 0; int j = 0; int k = 0; int di = 0; int i_ = 0; alglib.ap.assert(c.stype==-3 || c.stype==-1, "Spline3DLinTransXYZ: incorrect C (incorrect parameter C.SType)"); x = new double[c.n]; y = new double[c.m]; z = new double[c.l]; f = new double[c.m*c.n*c.l*c.d]; for(j=0; j<=c.n-1; j++) { x[j] = c.x[j]; } for(i=0; i<=c.m-1; i++) { y[i] = c.y[i]; } for(i=0; i<=c.l-1; i++) { z[i] = c.z[i]; } // // Handle different combinations of zero/nonzero AX/AY/AZ // if( ((double)(ax)!=(double)(0) && (double)(ay)!=(double)(0)) && (double)(az)!=(double)(0) ) { for(i_=0; i_<=c.m*c.n*c.l*c.d-1;i_++) { f[i_] = c.f[i_]; } } if( ((double)(ax)==(double)(0) && (double)(ay)!=(double)(0)) && (double)(az)!=(double)(0) ) { for(i=0; i<=c.m-1; i++) { for(j=0; j<=c.l-1; j++) { spline3dcalcv(c, bx, y[i], z[j], ref v, _params); for(k=0; k<=c.n-1; k++) { for(di=0; di<=c.d-1; di++) { f[c.d*(c.n*(c.m*j+i)+k)+di] = v[di]; } } } } ax = 1; bx = 0; } if( ((double)(ax)!=(double)(0) && (double)(ay)==(double)(0)) && (double)(az)!=(double)(0) ) { for(i=0; i<=c.n-1; i++) { for(j=0; j<=c.l-1; j++) { spline3dcalcv(c, x[i], by, z[j], ref v, _params); for(k=0; k<=c.m-1; k++) { for(di=0; di<=c.d-1; di++) { f[c.d*(c.n*(c.m*j+k)+i)+di] = v[di]; } } } } ay = 1; by = 0; } if( ((double)(ax)!=(double)(0) && (double)(ay)!=(double)(0)) && (double)(az)==(double)(0) ) { for(i=0; i<=c.n-1; i++) { for(j=0; j<=c.m-1; j++) { spline3dcalcv(c, x[i], y[j], bz, ref v, _params); for(k=0; k<=c.l-1; k++) { for(di=0; di<=c.d-1; di++) { f[c.d*(c.n*(c.m*k+j)+i)+di] = v[di]; } } } } az = 1; bz = 0; } if( ((double)(ax)==(double)(0) && (double)(ay)==(double)(0)) && (double)(az)!=(double)(0) ) { for(i=0; i<=c.l-1; i++) { spline3dcalcv(c, bx, by, z[i], ref v, _params); for(k=0; k<=c.m-1; k++) { for(j=0; j<=c.n-1; j++) { for(di=0; di<=c.d-1; di++) { f[c.d*(c.n*(c.m*i+k)+j)+di] = v[di]; } } } } ax = 1; bx = 0; ay = 1; by = 0; } if( ((double)(ax)==(double)(0) && (double)(ay)!=(double)(0)) && (double)(az)==(double)(0) ) { for(i=0; i<=c.m-1; i++) { spline3dcalcv(c, bx, y[i], bz, ref v, _params); for(k=0; k<=c.l-1; k++) { for(j=0; j<=c.n-1; j++) { for(di=0; di<=c.d-1; di++) { f[c.d*(c.n*(c.m*k+i)+j)+di] = v[di]; } } } } ax = 1; bx = 0; az = 1; bz = 0; } if( ((double)(ax)!=(double)(0) && (double)(ay)==(double)(0)) && (double)(az)==(double)(0) ) { for(i=0; i<=c.n-1; i++) { spline3dcalcv(c, x[i], by, bz, ref v, _params); for(k=0; k<=c.l-1; k++) { for(j=0; j<=c.m-1; j++) { for(di=0; di<=c.d-1; di++) { f[c.d*(c.n*(c.m*k+j)+i)+di] = v[di]; } } } } ay = 1; by = 0; az = 1; bz = 0; } if( ((double)(ax)==(double)(0) && (double)(ay)==(double)(0)) && (double)(az)==(double)(0) ) { spline3dcalcv(c, bx, by, bz, ref v, _params); for(k=0; k<=c.l-1; k++) { for(j=0; j<=c.m-1; j++) { for(i=0; i<=c.n-1; i++) { for(di=0; di<=c.d-1; di++) { f[c.d*(c.n*(c.m*k+j)+i)+di] = v[di]; } } } } ax = 1; bx = 0; ay = 1; by = 0; az = 1; bz = 0; } // // General case: AX<>0, AY<>0, AZ<>0 // Unpack, scale and pack again. // for(i=0; i<=c.n-1; i++) { x[i] = (x[i]-bx)/ax; } for(i=0; i<=c.m-1; i++) { y[i] = (y[i]-by)/ay; } for(i=0; i<=c.l-1; i++) { z[i] = (z[i]-bz)/az; } if( c.stype==-1 ) { spline3dbuildtrilinearv(x, c.n, y, c.m, z, c.l, f, c.d, c, _params); } } /************************************************************************* This subroutine performs linear transformation of the spline. INPUT PARAMETERS: C - spline interpolant. A, B- transformation coefficients: S2(x,y) = A*S(x,y,z) + B OUTPUT PARAMETERS: C - transformed spline -- ALGLIB PROJECT -- Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline3dlintransf(spline3dinterpolant c, double a, double b, alglib.xparams _params) { double[] x = new double[0]; double[] y = new double[0]; double[] z = new double[0]; double[] f = new double[0]; int i = 0; int j = 0; alglib.ap.assert(c.stype==-3 || c.stype==-1, "Spline3DLinTransF: incorrect C (incorrect parameter C.SType)"); x = new double[c.n]; y = new double[c.m]; z = new double[c.l]; f = new double[c.m*c.n*c.l*c.d]; for(j=0; j<=c.n-1; j++) { x[j] = c.x[j]; } for(i=0; i<=c.m-1; i++) { y[i] = c.y[i]; } for(i=0; i<=c.l-1; i++) { z[i] = c.z[i]; } for(i=0; i<=c.m*c.n*c.l*c.d-1; i++) { f[i] = a*c.f[i]+b; } if( c.stype==-1 ) { spline3dbuildtrilinearv(x, c.n, y, c.m, z, c.l, f, c.d, c, _params); } } /************************************************************************* This subroutine makes the copy of the spline model. INPUT PARAMETERS: C - spline interpolant OUTPUT PARAMETERS: CC - spline copy -- ALGLIB PROJECT -- Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline3dcopy(spline3dinterpolant c, spline3dinterpolant cc, alglib.xparams _params) { int tblsize = 0; int i_ = 0; alglib.ap.assert(c.k==1 || c.k==3, "Spline3DCopy: incorrect C (incorrect parameter C.K)"); cc.k = c.k; cc.n = c.n; cc.m = c.m; cc.l = c.l; cc.d = c.d; tblsize = c.n*c.m*c.l*c.d; cc.stype = c.stype; cc.x = new double[cc.n]; cc.y = new double[cc.m]; cc.z = new double[cc.l]; cc.f = new double[tblsize]; for(i_=0; i_<=cc.n-1;i_++) { cc.x[i_] = c.x[i_]; } for(i_=0; i_<=cc.m-1;i_++) { cc.y[i_] = c.y[i_]; } for(i_=0; i_<=cc.l-1;i_++) { cc.z[i_] = c.z[i_]; } for(i_=0; i_<=tblsize-1;i_++) { cc.f[i_] = c.f[i_]; } } /************************************************************************* Trilinear spline resampling INPUT PARAMETERS: A - array[0..OldXCount*OldYCount*OldZCount-1], function values at the old grid, : A[0] x=0,y=0,z=0 A[1] x=1,y=0,z=0 A[..] ... A[..] x=oldxcount-1,y=0,z=0 A[..] x=0,y=1,z=0 A[..] ... ... OldZCount - old Z-count, OldZCount>1 OldYCount - old Y-count, OldYCount>1 OldXCount - old X-count, OldXCount>1 NewZCount - new Z-count, NewZCount>1 NewYCount - new Y-count, NewYCount>1 NewXCount - new X-count, NewXCount>1 OUTPUT PARAMETERS: B - array[0..NewXCount*NewYCount*NewZCount-1], function values at the new grid: B[0] x=0,y=0,z=0 B[1] x=1,y=0,z=0 B[..] ... B[..] x=newxcount-1,y=0,z=0 B[..] x=0,y=1,z=0 B[..] ... ... -- ALGLIB routine -- 26.04.2012 Copyright by Bochkanov Sergey *************************************************************************/ public static void spline3dresampletrilinear(double[] a, int oldzcount, int oldycount, int oldxcount, int newzcount, int newycount, int newxcount, ref double[] b, alglib.xparams _params) { double xd = 0; double yd = 0; double zd = 0; double c0 = 0; double c1 = 0; double c2 = 0; double c3 = 0; int ix = 0; int iy = 0; int iz = 0; int i = 0; int j = 0; int k = 0; b = new double[0]; alglib.ap.assert((oldycount>1 && oldzcount>1) && oldxcount>1, "Spline3DResampleTrilinear: length/width/height less than 1"); alglib.ap.assert((newycount>1 && newzcount>1) && newxcount>1, "Spline3DResampleTrilinear: length/width/height less than 1"); alglib.ap.assert(alglib.ap.len(a)>=oldycount*oldzcount*oldxcount, "Spline3DResampleTrilinear: length/width/height less than 1"); b = new double[newxcount*newycount*newzcount]; for(i=0; i<=newxcount-1; i++) { for(j=0; j<=newycount-1; j++) { for(k=0; k<=newzcount-1; k++) { ix = i*(oldxcount-1)/(newxcount-1); if( ix==oldxcount-1 ) { ix = oldxcount-2; } xd = (double)(i*(oldxcount-1))/(double)(newxcount-1)-ix; iy = j*(oldycount-1)/(newycount-1); if( iy==oldycount-1 ) { iy = oldycount-2; } yd = (double)(j*(oldycount-1))/(double)(newycount-1)-iy; iz = k*(oldzcount-1)/(newzcount-1); if( iz==oldzcount-1 ) { iz = oldzcount-2; } zd = (double)(k*(oldzcount-1))/(double)(newzcount-1)-iz; c0 = a[oldxcount*(oldycount*iz+iy)+ix]*(1-xd)+a[oldxcount*(oldycount*iz+iy)+(ix+1)]*xd; c1 = a[oldxcount*(oldycount*iz+(iy+1))+ix]*(1-xd)+a[oldxcount*(oldycount*iz+(iy+1))+(ix+1)]*xd; c2 = a[oldxcount*(oldycount*(iz+1)+iy)+ix]*(1-xd)+a[oldxcount*(oldycount*(iz+1)+iy)+(ix+1)]*xd; c3 = a[oldxcount*(oldycount*(iz+1)+(iy+1))+ix]*(1-xd)+a[oldxcount*(oldycount*(iz+1)+(iy+1))+(ix+1)]*xd; c0 = c0*(1-yd)+c1*yd; c1 = c2*(1-yd)+c3*yd; b[newxcount*(newycount*k+j)+i] = c0*(1-zd)+c1*zd; } } } } /************************************************************************* This subroutine builds trilinear vector-valued spline. INPUT PARAMETERS: X - spline abscissas, array[0..N-1] Y - spline ordinates, array[0..M-1] Z - spline applicates, array[0..L-1] F - function values, array[0..M*N*L*D-1]: * first D elements store D values at (X[0],Y[0],Z[0]) * next D elements store D values at (X[1],Y[0],Z[0]) * next D elements store D values at (X[2],Y[0],Z[0]) * ... * next D elements store D values at (X[0],Y[1],Z[0]) * next D elements store D values at (X[1],Y[1],Z[0]) * next D elements store D values at (X[2],Y[1],Z[0]) * ... * next D elements store D values at (X[0],Y[0],Z[1]) * next D elements store D values at (X[1],Y[0],Z[1]) * next D elements store D values at (X[2],Y[0],Z[1]) * ... * general form - D function values at (X[i],Y[j]) are stored at F[D*(N*(M*K+J)+I)...D*(N*(M*K+J)+I)+D-1]. M,N, L - grid size, M>=2, N>=2, L>=2 D - vector dimension, D>=1 OUTPUT PARAMETERS: C - spline interpolant -- ALGLIB PROJECT -- Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline3dbuildtrilinearv(double[] x, int n, double[] y, int m, double[] z, int l, double[] f, int d, spline3dinterpolant c, alglib.xparams _params) { double t = 0; int tblsize = 0; int i = 0; int j = 0; int k = 0; int i0 = 0; int j0 = 0; alglib.ap.assert(m>=2, "Spline3DBuildTrilinearV: M<2"); alglib.ap.assert(n>=2, "Spline3DBuildTrilinearV: N<2"); alglib.ap.assert(l>=2, "Spline3DBuildTrilinearV: L<2"); alglib.ap.assert(d>=1, "Spline3DBuildTrilinearV: D<1"); alglib.ap.assert((alglib.ap.len(x)>=n && alglib.ap.len(y)>=m) && alglib.ap.len(z)>=l, "Spline3DBuildTrilinearV: length of X, Y or Z is too short (Length(X/Y/Z)=tblsize, "Spline3DBuildTrilinearV: length of F is too short (Length(F)=(double)(x) ) { r = h; } else { l = h; } } ix = l; // // Binary search in the [ y[0], ..., y[n-2] ] (y[n-1] is not included) // l = 0; r = c.m-1; while( l!=r-1 ) { h = (l+r)/2; if( (double)(c.y[h])>=(double)(y) ) { r = h; } else { l = h; } } iy = l; // // Binary search in the [ z[0], ..., z[n-2] ] (z[n-1] is not included) // l = 0; r = c.l-1; while( l!=r-1 ) { h = (l+r)/2; if( (double)(c.z[h])>=(double)(z) ) { r = h; } else { l = h; } } iz = l; xd = (x-c.x[ix])/(c.x[ix+1]-c.x[ix]); yd = (y-c.y[iy])/(c.y[iy+1]-c.y[iy]); zd = (z-c.z[iz])/(c.z[iz+1]-c.z[iz]); for(i=0; i<=c.d-1; i++) { // // Trilinear interpolation // if( c.stype==-1 ) { c0 = c.f[c.d*(c.n*(c.m*iz+iy)+ix)+i]*(1-xd)+c.f[c.d*(c.n*(c.m*iz+iy)+(ix+1))+i]*xd; c1 = c.f[c.d*(c.n*(c.m*iz+(iy+1))+ix)+i]*(1-xd)+c.f[c.d*(c.n*(c.m*iz+(iy+1))+(ix+1))+i]*xd; c2 = c.f[c.d*(c.n*(c.m*(iz+1)+iy)+ix)+i]*(1-xd)+c.f[c.d*(c.n*(c.m*(iz+1)+iy)+(ix+1))+i]*xd; c3 = c.f[c.d*(c.n*(c.m*(iz+1)+(iy+1))+ix)+i]*(1-xd)+c.f[c.d*(c.n*(c.m*(iz+1)+(iy+1))+(ix+1))+i]*xd; c0 = c0*(1-yd)+c1*yd; c1 = c2*(1-yd)+c3*yd; f[i] = c0*(1-zd)+c1*zd; } } } /************************************************************************* This subroutine calculates trilinear or tricubic vector-valued spline at the given point (X,Y,Z). INPUT PARAMETERS: C - spline interpolant. X, Y, Z - point OUTPUT PARAMETERS: F - array[D] which stores function values. F is out-parameter and it is reallocated after call to this function. In case you want to reuse previously allocated F, you may use Spline2DCalcVBuf(), which reallocates F only when it is too small. -- ALGLIB PROJECT -- Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline3dcalcv(spline3dinterpolant c, double x, double y, double z, ref double[] f, alglib.xparams _params) { f = new double[0]; alglib.ap.assert(c.stype==-1 || c.stype==-3, "Spline3DCalcV: incorrect C (incorrect parameter C.SType)"); alglib.ap.assert((math.isfinite(x) && math.isfinite(y)) && math.isfinite(z), "Spline3DCalcV: X=NaN/Infinite, Y=NaN/Infinite or Z=NaN/Infinite"); f = new double[c.d]; spline3dcalcvbuf(c, x, y, z, ref f, _params); } /************************************************************************* This subroutine unpacks tri-dimensional spline into the coefficients table INPUT PARAMETERS: C - spline interpolant. Result: N - grid size (X) M - grid size (Y) L - grid size (Z) D - number of components SType- spline type. Currently, only one spline type is supported: trilinear spline, as indicated by SType=1. Tbl - spline coefficients: [0..(N-1)*(M-1)*(L-1)*D-1, 0..13]. For T=0..D-1 (component index), I = 0...N-2 (x index), J=0..M-2 (y index), K=0..L-2 (z index): Q := T + I*D + J*D*(N-1) + K*D*(N-1)*(M-1), Q-th row stores decomposition for T-th component of the vector-valued function Tbl[Q,0] = X[i] Tbl[Q,1] = X[i+1] Tbl[Q,2] = Y[j] Tbl[Q,3] = Y[j+1] Tbl[Q,4] = Z[k] Tbl[Q,5] = Z[k+1] Tbl[Q,6] = C000 Tbl[Q,7] = C100 Tbl[Q,8] = C010 Tbl[Q,9] = C110 Tbl[Q,10]= C001 Tbl[Q,11]= C101 Tbl[Q,12]= C011 Tbl[Q,13]= C111 On each grid square spline is equals to: S(x) = SUM(c[i,j,k]*(x^i)*(y^j)*(z^k), i=0..1, j=0..1, k=0..1) t = x-x[j] u = y-y[i] v = z-z[k] NOTE: format of Tbl is given for SType=1. Future versions of ALGLIB can use different formats for different values of SType. -- ALGLIB PROJECT -- Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline3dunpackv(spline3dinterpolant c, ref int n, ref int m, ref int l, ref int d, ref int stype, ref double[,] tbl, alglib.xparams _params) { int p = 0; int ci = 0; int cj = 0; int ck = 0; double du = 0; double dv = 0; double dw = 0; int i = 0; int j = 0; int k = 0; int di = 0; int i0 = 0; n = 0; m = 0; l = 0; d = 0; stype = 0; tbl = new double[0,0]; alglib.ap.assert(c.stype==-1, "Spline3DUnpackV: incorrect C (incorrect parameter C.SType)"); n = c.n; m = c.m; l = c.l; d = c.d; stype = Math.Abs(c.stype); tbl = new double[(n-1)*(m-1)*(l-1)*d, 14]; // // Fill // for(i=0; i<=n-2; i++) { for(j=0; j<=m-2; j++) { for(k=0; k<=l-2; k++) { for(di=0; di<=d-1; di++) { p = d*((n-1)*((m-1)*k+j)+i)+di; tbl[p,0] = c.x[i]; tbl[p,1] = c.x[i+1]; tbl[p,2] = c.y[j]; tbl[p,3] = c.y[j+1]; tbl[p,4] = c.z[k]; tbl[p,5] = c.z[k+1]; du = 1/(tbl[p,1]-tbl[p,0]); dv = 1/(tbl[p,3]-tbl[p,2]); dw = 1/(tbl[p,5]-tbl[p,4]); // // Trilinear interpolation // if( c.stype==-1 ) { for(i0=6; i0<=13; i0++) { tbl[p,i0] = 0; } tbl[p,6+2*(2*0+0)+0] = c.f[d*(n*(m*k+j)+i)+di]; tbl[p,6+2*(2*0+0)+1] = c.f[d*(n*(m*k+j)+(i+1))+di]-c.f[d*(n*(m*k+j)+i)+di]; tbl[p,6+2*(2*0+1)+0] = c.f[d*(n*(m*k+(j+1))+i)+di]-c.f[d*(n*(m*k+j)+i)+di]; tbl[p,6+2*(2*0+1)+1] = c.f[d*(n*(m*k+(j+1))+(i+1))+di]-c.f[d*(n*(m*k+(j+1))+i)+di]-c.f[d*(n*(m*k+j)+(i+1))+di]+c.f[d*(n*(m*k+j)+i)+di]; tbl[p,6+2*(2*1+0)+0] = c.f[d*(n*(m*(k+1)+j)+i)+di]-c.f[d*(n*(m*k+j)+i)+di]; tbl[p,6+2*(2*1+0)+1] = c.f[d*(n*(m*(k+1)+j)+(i+1))+di]-c.f[d*(n*(m*(k+1)+j)+i)+di]-c.f[d*(n*(m*k+j)+(i+1))+di]+c.f[d*(n*(m*k+j)+i)+di]; tbl[p,6+2*(2*1+1)+0] = c.f[d*(n*(m*(k+1)+(j+1))+i)+di]-c.f[d*(n*(m*(k+1)+j)+i)+di]-c.f[d*(n*(m*k+(j+1))+i)+di]+c.f[d*(n*(m*k+j)+i)+di]; tbl[p,6+2*(2*1+1)+1] = c.f[d*(n*(m*(k+1)+(j+1))+(i+1))+di]-c.f[d*(n*(m*(k+1)+(j+1))+i)+di]-c.f[d*(n*(m*(k+1)+j)+(i+1))+di]+c.f[d*(n*(m*(k+1)+j)+i)+di]-c.f[d*(n*(m*k+(j+1))+(i+1))+di]+c.f[d*(n*(m*k+(j+1))+i)+di]+c.f[d*(n*(m*k+j)+(i+1))+di]-c.f[d*(n*(m*k+j)+i)+di]; } // // Rescale Cij // for(ci=0; ci<=1; ci++) { for(cj=0; cj<=1; cj++) { for(ck=0; ck<=1; ck++) { tbl[p,6+2*(2*ck+cj)+ci] = tbl[p,6+2*(2*ck+cj)+ci]*Math.Pow(du, ci)*Math.Pow(dv, cj)*Math.Pow(dw, ck); } } } } } } } } /************************************************************************* This subroutine calculates the value of the trilinear(or tricubic;possible will be later) spline at the given point X(and its derivatives; possible will be later). INPUT PARAMETERS: C - spline interpolant. X, Y, Z - point OUTPUT PARAMETERS: F - S(x,y,z) FX - dS(x,y,z)/dX FY - dS(x,y,z)/dY FXY - d2S(x,y,z)/dXdY -- ALGLIB PROJECT -- Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ private static void spline3ddiff(spline3dinterpolant c, double x, double y, double z, ref double f, ref double fx, ref double fy, ref double fxy, alglib.xparams _params) { double xd = 0; double yd = 0; double zd = 0; double c0 = 0; double c1 = 0; double c2 = 0; double c3 = 0; int ix = 0; int iy = 0; int iz = 0; int l = 0; int r = 0; int h = 0; f = 0; fx = 0; fy = 0; fxy = 0; alglib.ap.assert(c.stype==-1 || c.stype==-3, "Spline3DDiff: incorrect C (incorrect parameter C.SType)"); alglib.ap.assert(math.isfinite(x) && math.isfinite(y), "Spline3DDiff: X or Y contains NaN or Infinite value"); // // Prepare F, dF/dX, dF/dY, d2F/dXdY // f = 0; fx = 0; fy = 0; fxy = 0; if( c.d!=1 ) { return; } // // Binary search in the [ x[0], ..., x[n-2] ] (x[n-1] is not included) // l = 0; r = c.n-1; while( l!=r-1 ) { h = (l+r)/2; if( (double)(c.x[h])>=(double)(x) ) { r = h; } else { l = h; } } ix = l; // // Binary search in the [ y[0], ..., y[n-2] ] (y[n-1] is not included) // l = 0; r = c.m-1; while( l!=r-1 ) { h = (l+r)/2; if( (double)(c.y[h])>=(double)(y) ) { r = h; } else { l = h; } } iy = l; // // Binary search in the [ z[0], ..., z[n-2] ] (z[n-1] is not included) // l = 0; r = c.l-1; while( l!=r-1 ) { h = (l+r)/2; if( (double)(c.z[h])>=(double)(z) ) { r = h; } else { l = h; } } iz = l; xd = (x-c.x[ix])/(c.x[ix+1]-c.x[ix]); yd = (y-c.y[iy])/(c.y[iy+1]-c.y[iy]); zd = (z-c.z[iz])/(c.z[iz+1]-c.z[iz]); // // Trilinear interpolation // if( c.stype==-1 ) { c0 = c.f[c.n*(c.m*iz+iy)+ix]*(1-xd)+c.f[c.n*(c.m*iz+iy)+(ix+1)]*xd; c1 = c.f[c.n*(c.m*iz+(iy+1))+ix]*(1-xd)+c.f[c.n*(c.m*iz+(iy+1))+(ix+1)]*xd; c2 = c.f[c.n*(c.m*(iz+1)+iy)+ix]*(1-xd)+c.f[c.n*(c.m*(iz+1)+iy)+(ix+1)]*xd; c3 = c.f[c.n*(c.m*(iz+1)+(iy+1))+ix]*(1-xd)+c.f[c.n*(c.m*(iz+1)+(iy+1))+(ix+1)]*xd; c0 = c0*(1-yd)+c1*yd; c1 = c2*(1-yd)+c3*yd; f = c0*(1-zd)+c1*zd; } } } public class polint { /************************************************************************* Conversion from barycentric representation to Chebyshev basis. This function has O(N^2) complexity. INPUT PARAMETERS: P - polynomial in barycentric form A,B - base interval for Chebyshev polynomials (see below) A<>B OUTPUT PARAMETERS T - coefficients of Chebyshev representation; P(x) = sum { T[i]*Ti(2*(x-A)/(B-A)-1), i=0..N-1 }, where Ti - I-th Chebyshev polynomial. NOTES: barycentric interpolant passed as P may be either polynomial obtained from polynomial interpolation/ fitting or rational function which is NOT polynomial. We can't distinguish between these two cases, and this algorithm just tries to work assuming that P IS a polynomial. If not, algorithm will return results, but they won't have any meaning. -- ALGLIB -- Copyright 30.09.2010 by Bochkanov Sergey *************************************************************************/ public static void polynomialbar2cheb(ratint.barycentricinterpolant p, double a, double b, ref double[] t, alglib.xparams _params) { int i = 0; int k = 0; double[] vp = new double[0]; double[] vx = new double[0]; double[] tk = new double[0]; double[] tk1 = new double[0]; double v = 0; int i_ = 0; t = new double[0]; alglib.ap.assert(math.isfinite(a), "PolynomialBar2Cheb: A is not finite!"); alglib.ap.assert(math.isfinite(b), "PolynomialBar2Cheb: B is not finite!"); alglib.ap.assert((double)(a)!=(double)(b), "PolynomialBar2Cheb: A=B!"); alglib.ap.assert(p.n>0, "PolynomialBar2Cheb: P is not correctly initialized barycentric interpolant!"); // // Calculate function values on a Chebyshev grid // vp = new double[p.n]; vx = new double[p.n]; for(i=0; i<=p.n-1; i++) { vx[i] = Math.Cos(Math.PI*(i+0.5)/p.n); vp[i] = ratint.barycentriccalc(p, 0.5*(vx[i]+1)*(b-a)+a, _params); } // // T[0] // t = new double[p.n]; v = 0; for(i=0; i<=p.n-1; i++) { v = v+vp[i]; } t[0] = v/p.n; // // other T's. // // NOTES: // 1. TK stores T{k} on VX, TK1 stores T{k-1} on VX // 2. we can do same calculations with fast DCT, but it // * adds dependencies // * still leaves us with O(N^2) algorithm because // preparation of function values is O(N^2) process // if( p.n>1 ) { tk = new double[p.n]; tk1 = new double[p.n]; for(i=0; i<=p.n-1; i++) { tk[i] = vx[i]; tk1[i] = 1; } for(k=1; k<=p.n-1; k++) { // // calculate discrete product of function vector and TK // v = 0.0; for(i_=0; i_<=p.n-1;i_++) { v += tk[i_]*vp[i_]; } t[k] = v/(0.5*p.n); // // Update TK and TK1 // for(i=0; i<=p.n-1; i++) { v = 2*vx[i]*tk[i]-tk1[i]; tk1[i] = tk[i]; tk[i] = v; } } } } /************************************************************************* Conversion from Chebyshev basis to barycentric representation. This function has O(N^2) complexity. INPUT PARAMETERS: T - coefficients of Chebyshev representation; P(x) = sum { T[i]*Ti(2*(x-A)/(B-A)-1), i=0..N }, where Ti - I-th Chebyshev polynomial. N - number of coefficients: * if given, only leading N elements of T are used * if not given, automatically determined from size of T A,B - base interval for Chebyshev polynomials (see above) A=1, "PolynomialBar2Cheb: N<1"); alglib.ap.assert(alglib.ap.len(t)>=n, "PolynomialBar2Cheb: Length(T)0. OUTPUT PARAMETERS A - coefficients, P(x) = sum { A[i]*((X-C)/S)^i, i=0..N-1 } N - number of coefficients (polynomial degree plus 1) NOTES: 1. this function accepts offset and scale, which can be set to improve numerical properties of polynomial. For example, if P was obtained as result of interpolation on [-1,+1], you can set C=0 and S=1 and represent P as sum of 1, x, x^2, x^3 and so on. In most cases you it is exactly what you need. However, if your interpolation model was built on [999,1001], you will see significant growth of numerical errors when using {1, x, x^2, x^3} as basis. Representing P as sum of 1, (x-1000), (x-1000)^2, (x-1000)^3 will be better option. Such representation can be obtained by using 1000.0 as offset C and 1.0 as scale S. 2. power basis is ill-conditioned and tricks described above can't solve this problem completely. This function will return coefficients in any case, but for N>8 they will become unreliable. However, N's less than 5 are pretty safe. 3. barycentric interpolant passed as P may be either polynomial obtained from polynomial interpolation/ fitting or rational function which is NOT polynomial. We can't distinguish between these two cases, and this algorithm just tries to work assuming that P IS a polynomial. If not, algorithm will return results, but they won't have any meaning. -- ALGLIB -- Copyright 30.09.2010 by Bochkanov Sergey *************************************************************************/ public static void polynomialbar2pow(ratint.barycentricinterpolant p, double c, double s, ref double[] a, alglib.xparams _params) { int i = 0; int k = 0; double e = 0; double d = 0; double[] vp = new double[0]; double[] vx = new double[0]; double[] tk = new double[0]; double[] tk1 = new double[0]; double[] t = new double[0]; double v = 0; double c0 = 0; double s0 = 0; double va = 0; double vb = 0; double[] vai = new double[0]; double[] vbi = new double[0]; double minx = 0; double maxx = 0; int i_ = 0; a = new double[0]; // // We have barycentric model built using set of points X[], and we // want to convert it to power basis centered about point C with // scale S: I-th basis function is ((X-C)/S)^i. // // We use following three-stage algorithm: // // 1. we build Chebyshev representation of polynomial using // intermediate center C0 and scale S0, which are derived from X[]: // C0 = 0.5*(min(X)+max(X)), S0 = 0.5*(max(X)-min(X)). Chebyshev // representation is built by sampling points around center C0, // with typical distance between them proportional to S0. // 2. then we transform form Chebyshev basis to intermediate power // basis, using same center/scale C0/S0. // 3. after that, we apply linear transformation to intermediate // power basis which moves it to final center/scale C/S. // // The idea of such multi-stage algorithm is that it is much easier to // transform barycentric model to Chebyshev basis, and only later to // power basis, than transforming it directly to power basis. It is // also more numerically stable to sample points using intermediate C0/S0, // which are derived from user-supplied model, than using "final" C/S, // which may be unsuitable for sampling (say, if S=1, we may have stability // problems when working with models built from dataset with non-unit // scale of abscissas). // alglib.ap.assert(math.isfinite(c), "PolynomialBar2Pow: C is not finite!"); alglib.ap.assert(math.isfinite(s), "PolynomialBar2Pow: S is not finite!"); alglib.ap.assert((double)(s)!=(double)(0), "PolynomialBar2Pow: S=0!"); alglib.ap.assert(p.n>0, "PolynomialBar2Pow: P is not correctly initialized barycentric interpolant!"); // // Select intermediate center/scale // minx = p.x[0]; maxx = p.x[0]; for(i=1; i<=p.n-1; i++) { minx = Math.Min(minx, p.x[i]); maxx = Math.Max(maxx, p.x[i]); } if( (double)(minx)==(double)(maxx) ) { c0 = minx; s0 = 1.0; } else { c0 = 0.5*(maxx+minx); s0 = 0.5*(maxx-minx); } // // Calculate function values on a Chebyshev grid using intermediate C0/S0 // vp = new double[p.n+1]; vx = new double[p.n]; for(i=0; i<=p.n-1; i++) { vx[i] = Math.Cos(Math.PI*(i+0.5)/p.n); vp[i] = ratint.barycentriccalc(p, s0*vx[i]+c0, _params); } // // T[0] // t = new double[p.n]; v = 0; for(i=0; i<=p.n-1; i++) { v = v+vp[i]; } t[0] = v/p.n; // // other T's. // // NOTES: // 1. TK stores T{k} on VX, TK1 stores T{k-1} on VX // 2. we can do same calculations with fast DCT, but it // * adds dependencies // * still leaves us with O(N^2) algorithm because // preparation of function values is O(N^2) process // if( p.n>1 ) { tk = new double[p.n]; tk1 = new double[p.n]; for(i=0; i<=p.n-1; i++) { tk[i] = vx[i]; tk1[i] = 1; } for(k=1; k<=p.n-1; k++) { // // calculate discrete product of function vector and TK // v = 0.0; for(i_=0; i_<=p.n-1;i_++) { v += tk[i_]*vp[i_]; } t[k] = v/(0.5*p.n); // // Update TK and TK1 // for(i=0; i<=p.n-1; i++) { v = 2*vx[i]*tk[i]-tk1[i]; tk1[i] = tk[i]; tk[i] = v; } } } // // Convert from Chebyshev basis to power basis // a = new double[p.n]; for(i=0; i<=p.n-1; i++) { a[i] = 0; } d = 0; for(i=0; i<=p.n-1; i++) { for(k=i; k<=p.n-1; k++) { e = a[k]; a[k] = 0; if( i<=1 && k==i ) { a[k] = 1; } else { if( i!=0 ) { a[k] = 2*d; } if( k>i+1 ) { a[k] = a[k]-a[k-2]; } } d = e; } d = a[i]; e = 0; k = i; while( k<=p.n-1 ) { e = e+a[k]*t[k]; k = k+2; } a[i] = e; } // // Apply linear transformation which converts basis from intermediate // one Fi=((x-C0)/S0)^i to final one Fi=((x-C)/S)^i. // // We have y=(x-C0)/S0, z=(x-C)/S, and coefficients A[] for basis Fi(y). // Because we have y=A*z+B, for A=s/s0 and B=c/s0-c0/s0, we can perform // substitution and get coefficients A_new[] in basis Fi(z). // alglib.ap.assert(alglib.ap.len(vp)>=p.n+1, "PolynomialBar2Pow: internal error"); alglib.ap.assert(alglib.ap.len(t)>=p.n, "PolynomialBar2Pow: internal error"); for(i=0; i<=p.n-1; i++) { t[i] = 0.0; } va = s/s0; vb = c/s0-c0/s0; vai = new double[p.n]; vbi = new double[p.n]; vai[0] = 1; vbi[0] = 1; for(k=1; k<=p.n-1; k++) { vai[k] = vai[k-1]*va; vbi[k] = vbi[k-1]*vb; } for(k=0; k<=p.n-1; k++) { // // Generate set of binomial coefficients in VP[] // if( k>0 ) { vp[k] = 1; for(i=k-1; i>=1; i--) { vp[i] = vp[i]+vp[i-1]; } vp[0] = 1; } else { vp[0] = 1; } // // Update T[] with expansion of K-th basis function // for(i=0; i<=k; i++) { t[i] = t[i]+a[k]*vai[i]*vbi[k-i]*vp[i]; } } for(k=0; k<=p.n-1; k++) { a[k] = t[k]; } } /************************************************************************* Conversion from power basis to barycentric representation. This function has O(N^2) complexity. INPUT PARAMETERS: A - coefficients, P(x) = sum { A[i]*((X-C)/S)^i, i=0..N-1 } N - number of coefficients (polynomial degree plus 1) * if given, only leading N elements of A are used * if not given, automatically determined from size of A C - offset (see below); 0.0 is used as default value. S - scale (see below); 1.0 is used as default value. S<>0. OUTPUT PARAMETERS P - polynomial in barycentric form NOTES: 1. this function accepts offset and scale, which can be set to improve numerical properties of polynomial. For example, if you interpolate on [-1,+1], you can set C=0 and S=1 and convert from sum of 1, x, x^2, x^3 and so on. In most cases you it is exactly what you need. However, if your interpolation model was built on [999,1001], you will see significant growth of numerical errors when using {1, x, x^2, x^3} as input basis. Converting from sum of 1, (x-1000), (x-1000)^2, (x-1000)^3 will be better option (you have to specify 1000.0 as offset C and 1.0 as scale S). 2. power basis is ill-conditioned and tricks described above can't solve this problem completely. This function will return barycentric model in any case, but for N>8 accuracy well degrade. However, N's less than 5 are pretty safe. -- ALGLIB -- Copyright 30.09.2010 by Bochkanov Sergey *************************************************************************/ public static void polynomialpow2bar(double[] a, int n, double c, double s, ratint.barycentricinterpolant p, alglib.xparams _params) { int i = 0; int k = 0; double[] y = new double[0]; double vx = 0; double vy = 0; double px = 0; alglib.ap.assert(math.isfinite(c), "PolynomialPow2Bar: C is not finite!"); alglib.ap.assert(math.isfinite(s), "PolynomialPow2Bar: S is not finite!"); alglib.ap.assert((double)(s)!=(double)(0), "PolynomialPow2Bar: S is zero!"); alglib.ap.assert(n>=1, "PolynomialPow2Bar: N<1"); alglib.ap.assert(alglib.ap.len(a)>=n, "PolynomialPow2Bar: Length(A)=1 OUTPUT PARAMETERS P - barycentric model which represents Lagrange interpolant (see ratint unit info and BarycentricCalc() description for more information). -- ALGLIB -- Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ public static void polynomialbuild(double[] x, double[] y, int n, ratint.barycentricinterpolant p, alglib.xparams _params) { int j = 0; int k = 0; double[] w = new double[0]; double b = 0; double a = 0; double v = 0; double mx = 0; double[] sortrbuf = new double[0]; double[] sortrbuf2 = new double[0]; int i_ = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); alglib.ap.assert(n>0, "PolynomialBuild: N<=0!"); alglib.ap.assert(alglib.ap.len(x)>=n, "PolynomialBuild: Length(X)=n, "PolynomialBuild: Length(Y)=1 for N=1 a constant model is constructed. OUTPUT PARAMETERS P - barycentric model which represents Lagrange interpolant (see ratint unit info and BarycentricCalc() description for more information). -- ALGLIB -- Copyright 03.12.2009 by Bochkanov Sergey *************************************************************************/ public static void polynomialbuildeqdist(double a, double b, double[] y, int n, ratint.barycentricinterpolant p, alglib.xparams _params) { int i = 0; double[] w = new double[0]; double[] x = new double[0]; double v = 0; alglib.ap.assert(n>0, "PolynomialBuildEqDist: N<=0!"); alglib.ap.assert(alglib.ap.len(y)>=n, "PolynomialBuildEqDist: Length(Y)=1 for N=1 a constant model is constructed. OUTPUT PARAMETERS P - barycentric model which represents Lagrange interpolant (see ratint unit info and BarycentricCalc() description for more information). -- ALGLIB -- Copyright 03.12.2009 by Bochkanov Sergey *************************************************************************/ public static void polynomialbuildcheb1(double a, double b, double[] y, int n, ratint.barycentricinterpolant p, alglib.xparams _params) { int i = 0; double[] w = new double[0]; double[] x = new double[0]; double v = 0; double t = 0; alglib.ap.assert(n>0, "PolynomialBuildCheb1: N<=0!"); alglib.ap.assert(alglib.ap.len(y)>=n, "PolynomialBuildCheb1: Length(Y)=1 for N=1 a constant model is constructed. OUTPUT PARAMETERS P - barycentric model which represents Lagrange interpolant (see ratint unit info and BarycentricCalc() description for more information). -- ALGLIB -- Copyright 03.12.2009 by Bochkanov Sergey *************************************************************************/ public static void polynomialbuildcheb2(double a, double b, double[] y, int n, ratint.barycentricinterpolant p, alglib.xparams _params) { int i = 0; double[] w = new double[0]; double[] x = new double[0]; double v = 0; alglib.ap.assert(n>0, "PolynomialBuildCheb2: N<=0!"); alglib.ap.assert(alglib.ap.len(y)>=n, "PolynomialBuildCheb2: Length(Y)=1 for N=1 a constant model is constructed. T - position where P(x) is calculated RESULT value of the Lagrange interpolant at T IMPORTANT this function provides fast interface which is not overflow-safe nor it is very precise. the best option is to use PolynomialBuildEqDist()/BarycentricCalc() subroutines unless you are pretty sure that your data will not result in overflow. -- ALGLIB -- Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ public static double polynomialcalceqdist(double a, double b, double[] f, int n, double t, alglib.xparams _params) { double result = 0; double s1 = 0; double s2 = 0; double v = 0; double threshold = 0; double s = 0; double h = 0; int i = 0; int j = 0; double w = 0; double x = 0; alglib.ap.assert(n>0, "PolynomialCalcEqDist: N<=0!"); alglib.ap.assert(alglib.ap.len(f)>=n, "PolynomialCalcEqDist: Length(F)(double)(threshold) ) { // // use fast formula // j = -1; s = 1.0; } // // Calculate using safe or fast barycentric formula // s1 = 0; s2 = 0; w = 1.0; h = (b-a)/(n-1); for(i=0; i<=n-1; i++) { if( i!=j ) { v = s*w/(t-(a+i*h)); s1 = s1+v*f[i]; s2 = s2+v; } else { v = w; s1 = s1+v*f[i]; s2 = s2+v; } w = -(w*(n-1-i)); w = w/(i+1); } result = s1/s2; return result; } /************************************************************************* Fast polynomial interpolation function on Chebyshev points (first kind) with O(N) complexity. INPUT PARAMETERS: A - left boundary of [A,B] B - right boundary of [A,B] F - function values, array[0..N-1] N - number of points on Chebyshev grid (first kind), X[i] = 0.5*(B+A) + 0.5*(B-A)*Cos(PI*(2*i+1)/(2*n)) for N=1 a constant model is constructed. T - position where P(x) is calculated RESULT value of the Lagrange interpolant at T IMPORTANT this function provides fast interface which is not overflow-safe nor it is very precise. the best option is to use PolIntBuildCheb1()/BarycentricCalc() subroutines unless you are pretty sure that your data will not result in overflow. -- ALGLIB -- Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ public static double polynomialcalccheb1(double a, double b, double[] f, int n, double t, alglib.xparams _params) { double result = 0; double s1 = 0; double s2 = 0; double v = 0; double threshold = 0; double s = 0; int i = 0; int j = 0; double a0 = 0; double delta = 0; double alpha = 0; double beta = 0; double ca = 0; double sa = 0; double tempc = 0; double temps = 0; double x = 0; double w = 0; double p1 = 0; alglib.ap.assert(n>0, "PolynomialCalcCheb1: N<=0!"); alglib.ap.assert(alglib.ap.len(f)>=n, "PolynomialCalcCheb1: Length(F)(double)(threshold) ) { // // use fast formula // j = -1; s = 1.0; } // // Calculate using safe or fast barycentric formula // s1 = 0; s2 = 0; ca = Math.Cos(a0); sa = Math.Sin(a0); p1 = 1.0; for(i=0; i<=n-1; i++) { // // Calculate X[i], W[i] // x = ca; w = p1*sa; // // Proceed // if( i!=j ) { v = s*w/(t-x); s1 = s1+v*f[i]; s2 = s2+v; } else { v = w; s1 = s1+v*f[i]; s2 = s2+v; } // // Next CA, SA, P1 // temps = sa-(alpha*sa-beta*ca); tempc = ca-(alpha*ca+beta*sa); sa = temps; ca = tempc; p1 = -p1; } result = s1/s2; return result; } /************************************************************************* Fast polynomial interpolation function on Chebyshev points (second kind) with O(N) complexity. INPUT PARAMETERS: A - left boundary of [A,B] B - right boundary of [A,B] F - function values, array[0..N-1] N - number of points on Chebyshev grid (second kind), X[i] = 0.5*(B+A) + 0.5*(B-A)*Cos(PI*i/(n-1)) for N=1 a constant model is constructed. T - position where P(x) is calculated RESULT value of the Lagrange interpolant at T IMPORTANT this function provides fast interface which is not overflow-safe nor it is very precise. the best option is to use PolIntBuildCheb2()/BarycentricCalc() subroutines unless you are pretty sure that your data will not result in overflow. -- ALGLIB -- Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ public static double polynomialcalccheb2(double a, double b, double[] f, int n, double t, alglib.xparams _params) { double result = 0; double s1 = 0; double s2 = 0; double v = 0; double threshold = 0; double s = 0; int i = 0; int j = 0; double a0 = 0; double delta = 0; double alpha = 0; double beta = 0; double ca = 0; double sa = 0; double tempc = 0; double temps = 0; double x = 0; double w = 0; double p1 = 0; alglib.ap.assert(n>0, "PolynomialCalcCheb2: N<=0!"); alglib.ap.assert(alglib.ap.len(f)>=n, "PolynomialCalcCheb2: Length(F)(double)(threshold) ) { // // use fast formula // j = -1; s = 1.0; } // // Calculate using safe or fast barycentric formula // s1 = 0; s2 = 0; ca = Math.Cos(a0); sa = Math.Sin(a0); p1 = 1.0; for(i=0; i<=n-1; i++) { // // Calculate X[i], W[i] // x = ca; if( i==0 || i==n-1 ) { w = 0.5*p1; } else { w = 1.0*p1; } // // Proceed // if( i!=j ) { v = s*w/(t-x); s1 = s1+v*f[i]; s2 = s2+v; } else { v = w; s1 = s1+v*f[i]; s2 = s2+v; } // // Next CA, SA, P1 // temps = sa-(alpha*sa-beta*ca); tempc = ca-(alpha*ca+beta*sa); sa = temps; ca = tempc; p1 = -p1; } result = s1/s2; return result; } } public class lsfit { /************************************************************************* Polynomial fitting report: TaskRCond reciprocal of task's condition number RMSError RMS error AvgError average error AvgRelError average relative error (for non-zero Y[I]) MaxError maximum error *************************************************************************/ public class polynomialfitreport : apobject { public double taskrcond; public double rmserror; public double avgerror; public double avgrelerror; public double maxerror; public polynomialfitreport() { init(); } public override void init() { } public override alglib.apobject make_copy() { polynomialfitreport _result = new polynomialfitreport(); _result.taskrcond = taskrcond; _result.rmserror = rmserror; _result.avgerror = avgerror; _result.avgrelerror = avgrelerror; _result.maxerror = maxerror; return _result; } }; /************************************************************************* Barycentric fitting report: RMSError RMS error AvgError average error AvgRelError average relative error (for non-zero Y[I]) MaxError maximum error TaskRCond reciprocal of task's condition number *************************************************************************/ public class barycentricfitreport : apobject { public double taskrcond; public int dbest; public double rmserror; public double avgerror; public double avgrelerror; public double maxerror; public barycentricfitreport() { init(); } public override void init() { } public override alglib.apobject make_copy() { barycentricfitreport _result = new barycentricfitreport(); _result.taskrcond = taskrcond; _result.dbest = dbest; _result.rmserror = rmserror; _result.avgerror = avgerror; _result.avgrelerror = avgrelerror; _result.maxerror = maxerror; return _result; } }; /************************************************************************* Least squares fitting report. This structure contains informational fields which are set by fitting functions provided by this unit. Different functions initialize different sets of fields, so you should read documentation on specific function you used in order to know which fields are initialized. TaskRCond reciprocal of task's condition number IterationsCount number of internal iterations VarIdx if user-supplied gradient contains errors which were detected by nonlinear fitter, this field is set to index of the first component of gradient which is suspected to be spoiled by bugs. RMSError RMS error AvgError average error AvgRelError average relative error (for non-zero Y[I]) MaxError maximum error WRMSError weighted RMS error CovPar covariance matrix for parameters, filled by some solvers ErrPar vector of errors in parameters, filled by some solvers ErrCurve vector of fit errors - variability of the best-fit curve, filled by some solvers. Noise vector of per-point noise estimates, filled by some solvers. R2 coefficient of determination (non-weighted, non-adjusted), filled by some solvers. *************************************************************************/ public class lsfitreport : apobject { public double taskrcond; public int iterationscount; public int varidx; public double rmserror; public double avgerror; public double avgrelerror; public double maxerror; public double wrmserror; public double[,] covpar; public double[] errpar; public double[] errcurve; public double[] noise; public double r2; public lsfitreport() { init(); } public override void init() { covpar = new double[0,0]; errpar = new double[0]; errcurve = new double[0]; noise = new double[0]; } public override alglib.apobject make_copy() { lsfitreport _result = new lsfitreport(); _result.taskrcond = taskrcond; _result.iterationscount = iterationscount; _result.varidx = varidx; _result.rmserror = rmserror; _result.avgerror = avgerror; _result.avgrelerror = avgrelerror; _result.maxerror = maxerror; _result.wrmserror = wrmserror; _result.covpar = (double[,])covpar.Clone(); _result.errpar = (double[])errpar.Clone(); _result.errcurve = (double[])errcurve.Clone(); _result.noise = (double[])noise.Clone(); _result.r2 = r2; return _result; } }; /************************************************************************* Nonlinear fitter. You should use ALGLIB functions to work with fitter. Never try to access its fields directly! *************************************************************************/ public class lsfitstate : apobject { public int optalgo; public int m; public int k; public double epsx; public int maxits; public double stpmax; public bool xrep; public double[] c0; public double[] c1; public double[] s; public double[] bndl; public double[] bndu; public double[,] taskx; public double[] tasky; public int npoints; public double[] taskw; public int nweights; public int wkind; public int wits; public double diffstep; public double teststep; public double[,] cleic; public int nec; public int nic; public bool xupdated; public bool needf; public bool needfg; public bool needfgh; public int pointindex; public double[] x; public double[] c; public double f; public double[] g; public double[,] h; public double[] wcur; public int[] tmpct; public double[] tmp; public double[] tmpf; public double[,] tmpjac; public double[,] tmpjacw; public double tmpnoise; public matinv.matinvreport invrep; public int repiterationscount; public int repterminationtype; public int repvaridx; public double reprmserror; public double repavgerror; public double repavgrelerror; public double repmaxerror; public double repwrmserror; public lsfitreport rep; public minlm.minlmstate optstate; public minlm.minlmreport optrep; public int prevnpt; public int prevalgo; public rcommstate rstate; public lsfitstate() { init(); } public override void init() { c0 = new double[0]; c1 = new double[0]; s = new double[0]; bndl = new double[0]; bndu = new double[0]; taskx = new double[0,0]; tasky = new double[0]; taskw = new double[0]; cleic = new double[0,0]; x = new double[0]; c = new double[0]; g = new double[0]; h = new double[0,0]; wcur = new double[0]; tmpct = new int[0]; tmp = new double[0]; tmpf = new double[0]; tmpjac = new double[0,0]; tmpjacw = new double[0,0]; invrep = new matinv.matinvreport(); rep = new lsfitreport(); optstate = new minlm.minlmstate(); optrep = new minlm.minlmreport(); rstate = new rcommstate(); } public override alglib.apobject make_copy() { lsfitstate _result = new lsfitstate(); _result.optalgo = optalgo; _result.m = m; _result.k = k; _result.epsx = epsx; _result.maxits = maxits; _result.stpmax = stpmax; _result.xrep = xrep; _result.c0 = (double[])c0.Clone(); _result.c1 = (double[])c1.Clone(); _result.s = (double[])s.Clone(); _result.bndl = (double[])bndl.Clone(); _result.bndu = (double[])bndu.Clone(); _result.taskx = (double[,])taskx.Clone(); _result.tasky = (double[])tasky.Clone(); _result.npoints = npoints; _result.taskw = (double[])taskw.Clone(); _result.nweights = nweights; _result.wkind = wkind; _result.wits = wits; _result.diffstep = diffstep; _result.teststep = teststep; _result.cleic = (double[,])cleic.Clone(); _result.nec = nec; _result.nic = nic; _result.xupdated = xupdated; _result.needf = needf; _result.needfg = needfg; _result.needfgh = needfgh; _result.pointindex = pointindex; _result.x = (double[])x.Clone(); _result.c = (double[])c.Clone(); _result.f = f; _result.g = (double[])g.Clone(); _result.h = (double[,])h.Clone(); _result.wcur = (double[])wcur.Clone(); _result.tmpct = (int[])tmpct.Clone(); _result.tmp = (double[])tmp.Clone(); _result.tmpf = (double[])tmpf.Clone(); _result.tmpjac = (double[,])tmpjac.Clone(); _result.tmpjacw = (double[,])tmpjacw.Clone(); _result.tmpnoise = tmpnoise; _result.invrep = (matinv.matinvreport)invrep.make_copy(); _result.repiterationscount = repiterationscount; _result.repterminationtype = repterminationtype; _result.repvaridx = repvaridx; _result.reprmserror = reprmserror; _result.repavgerror = repavgerror; _result.repavgrelerror = repavgrelerror; _result.repmaxerror = repmaxerror; _result.repwrmserror = repwrmserror; _result.rep = (lsfitreport)rep.make_copy(); _result.optstate = (minlm.minlmstate)optstate.make_copy(); _result.optrep = (minlm.minlmreport)optrep.make_copy(); _result.prevnpt = prevnpt; _result.prevalgo = prevalgo; _result.rstate = (rcommstate)rstate.make_copy(); return _result; } }; /************************************************************************* This subroutine fits piecewise linear curve to points with Ramer-Douglas- Peucker algorithm, which stops after generating specified number of linear sections. IMPORTANT: * it does NOT perform least-squares fitting; it builds curve, but this curve does not minimize some least squares metric. See description of RDP algorithm (say, in Wikipedia) for more details on WHAT is performed. * this function does NOT work with parametric curves (i.e. curves which can be represented as {X(t),Y(t)}. It works with curves which can be represented as Y(X). Thus, it is impossible to model figures like circles with this functions. If you want to work with parametric curves, you should use ParametricRDPFixed() function provided by "Parametric" subpackage of "Interpolation" package. INPUT PARAMETERS: X - array of X-coordinates: * at least N elements * can be unordered (points are automatically sorted) * this function may accept non-distinct X (see below for more information on handling of such inputs) Y - array of Y-coordinates: * at least N elements N - number of elements in X/Y M - desired number of sections: * at most M sections are generated by this function * less than M sections can be generated if we have N=0, "LSTFitPiecewiseLinearRDPFixed: N<0"); alglib.ap.assert(m>=1, "LSTFitPiecewiseLinearRDPFixed: M<1"); alglib.ap.assert(alglib.ap.len(x)>=n, "LSTFitPiecewiseLinearRDPFixed: Length(X)=n, "LSTFitPiecewiseLinearRDPFixed: Length(Y)(double)(x[k]) ) { k = (int)Math.Round(sections[i,1]); } } points[nsections] = k; tsort.tagsortfast(ref points, ref buf0, nsections+1, _params); // // Output sections: // * first NSection elements of X2/Y2 are filled by x/y at left boundaries of sections // * last element of X2/Y2 is filled by right boundary of rightmost section // * X2/Y2 is sorted by ascending of X2 // x2 = new double[nsections+1]; y2 = new double[nsections+1]; for(i=0; i<=nsections; i++) { x2[i] = x[(int)Math.Round(points[i])]; y2[i] = y[(int)Math.Round(points[i])]; } } /************************************************************************* This subroutine fits piecewise linear curve to points with Ramer-Douglas- Peucker algorithm, which stops after achieving desired precision. IMPORTANT: * it performs non-least-squares fitting; it builds curve, but this curve does not minimize some least squares metric. See description of RDP algorithm (say, in Wikipedia) for more details on WHAT is performed. * this function does NOT work with parametric curves (i.e. curves which can be represented as {X(t),Y(t)}. It works with curves which can be represented as Y(X). Thus, it is impossible to model figures like circles with this functions. If you want to work with parametric curves, you should use ParametricRDPFixed() function provided by "Parametric" subpackage of "Interpolation" package. INPUT PARAMETERS: X - array of X-coordinates: * at least N elements * can be unordered (points are automatically sorted) * this function may accept non-distinct X (see below for more information on handling of such inputs) Y - array of Y-coordinates: * at least N elements N - number of elements in X/Y Eps - positive number, desired precision. OUTPUT PARAMETERS: X2 - X-values of corner points for piecewise approximation, has length NSections+1 or zero (for NSections=0). Y2 - Y-values of corner points, has length NSections+1 or zero (for NSections=0). NSections- number of sections found by algorithm, NSections can be zero for degenerate datasets (N<=1 or all X[] are non-distinct). NOTE: X2/Y2 are ordered arrays, i.e. (X2[0],Y2[0]) is a first point of curve, (X2[NSection-1],Y2[NSection-1]) is the last point. -- ALGLIB -- Copyright 02.10.2014 by Bochkanov Sergey *************************************************************************/ public static void lstfitpiecewiselinearrdp(double[] x, double[] y, int n, double eps, ref double[] x2, ref double[] y2, ref int nsections, alglib.xparams _params) { int i = 0; int j = 0; int k = 0; double[] buf0 = new double[0]; double[] buf1 = new double[0]; double[] xtmp = new double[0]; double[] ytmp = new double[0]; double v = 0; int npts = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); x2 = new double[0]; y2 = new double[0]; nsections = 0; alglib.ap.assert(n>=0, "LSTFitPiecewiseLinearRDP: N<0"); alglib.ap.assert((double)(eps)>(double)(0), "LSTFitPiecewiseLinearRDP: Eps<=0"); alglib.ap.assert(alglib.ap.len(x)>=n, "LSTFitPiecewiseLinearRDP: Length(X)=n, "LSTFitPiecewiseLinearRDP: Length(Y)0 * if given, only leading N elements of X/Y are used * if not given, automatically determined from sizes of X/Y M - number of basis functions (= polynomial_degree + 1), M>=1 OUTPUT PARAMETERS: Info- same format as in LSFitLinearW() subroutine: * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD P - interpolant in barycentric form. Rep - report, same format as in LSFitLinearW() subroutine. Following fields are set: * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED -- ALGLIB PROJECT -- Copyright 10.12.2009 by Bochkanov Sergey *************************************************************************/ public static void polynomialfit(double[] x, double[] y, int n, int m, ref int info, ratint.barycentricinterpolant p, polynomialfitreport rep, alglib.xparams _params) { int i = 0; double[] w = new double[0]; double[] xc = new double[0]; double[] yc = new double[0]; int[] dc = new int[0]; info = 0; alglib.ap.assert(n>0, "PolynomialFit: N<=0!"); alglib.ap.assert(m>0, "PolynomialFit: M<=0!"); alglib.ap.assert(alglib.ap.len(x)>=n, "PolynomialFit: Length(X)=n, "PolynomialFit: Length(Y)0. * if given, only leading N elements of X/Y/W are used * if not given, automatically determined from sizes of X/Y/W XC - points where polynomial values/derivatives are constrained, array[0..K-1]. YC - values of constraints, array[0..K-1] DC - array[0..K-1], types of constraints: * DC[i]=0 means that P(XC[i])=YC[i] * DC[i]=1 means that P'(XC[i])=YC[i] SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS K - number of constraints, 0<=K=1 OUTPUT PARAMETERS: Info- same format as in LSFitLinearW() subroutine: * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD -3 means inconsistent constraints P - interpolant in barycentric form. Rep - report, same format as in LSFitLinearW() subroutine. Following fields are set: * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroitine doesn't calculate task's condition number for K<>0. SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: Setting constraints can lead to undesired results, like ill-conditioned behavior, or inconsistency being detected. From the other side, it allows us to improve quality of the fit. Here we summarize our experience with constrained regression splines: * even simple constraints can be inconsistent, see Wikipedia article on this subject: http://en.wikipedia.org/wiki/Birkhoff_interpolation * the greater is M (given fixed constraints), the more chances that constraints will be consistent * in the general case, consistency of constraints is NOT GUARANTEED. * in the one special cases, however, we can guarantee consistency. This case is: M>1 and constraints on the function values (NOT DERIVATIVES) Our final recommendation is to use constraints WHEN AND ONLY when you can't solve your task without them. Anything beyond special cases given above is not guaranteed and may result in inconsistency. -- ALGLIB PROJECT -- Copyright 10.12.2009 by Bochkanov Sergey *************************************************************************/ public static void polynomialfitwc(double[] x, double[] y, double[] w, int n, double[] xc, double[] yc, int[] dc, int k, int m, ref int info, ratint.barycentricinterpolant p, polynomialfitreport rep, alglib.xparams _params) { double xa = 0; double xb = 0; double sa = 0; double sb = 0; double[] xoriginal = new double[0]; double[] yoriginal = new double[0]; double[] y2 = new double[0]; double[] w2 = new double[0]; double[] tmp = new double[0]; double[] tmp2 = new double[0]; double[] bx = new double[0]; double[] by = new double[0]; double[] bw = new double[0]; int i = 0; int j = 0; double u = 0; double v = 0; double s = 0; int relcnt = 0; lsfitreport lrep = new lsfitreport(); x = (double[])x.Clone(); y = (double[])y.Clone(); w = (double[])w.Clone(); xc = (double[])xc.Clone(); yc = (double[])yc.Clone(); info = 0; alglib.ap.assert(n>0, "PolynomialFitWC: N<=0!"); alglib.ap.assert(m>0, "PolynomialFitWC: M<=0!"); alglib.ap.assert(k>=0, "PolynomialFitWC: K<0!"); alglib.ap.assert(k=M!"); alglib.ap.assert(alglib.ap.len(x)>=n, "PolynomialFitWC: Length(X)=n, "PolynomialFitWC: Length(Y)=n, "PolynomialFitWC: Length(W)=k, "PolynomialFitWC: Length(XC)=k, "PolynomialFitWC: Length(YC)=k, "PolynomialFitWC: Length(DC)=0: * zero X is correctly handled even for B<=0 * negative X results in exception. A, B, C, D- parameters of 4PL model: * A is unconstrained * B is unconstrained; zero or negative values are handled correctly. * C>0, non-positive value results in exception * D is unconstrained RESULT: model value at X NOTE: if B=0, denominator is assumed to be equal to 2.0 even for zero X (strictly speaking, 0^0 is undefined). NOTE: this function also throws exception if all input parameters are correct, but overflow was detected during calculations. NOTE: this function performs a lot of checks; if you need really high performance, consider evaluating model yourself, without checking for degenerate cases. -- ALGLIB PROJECT -- Copyright 14.05.2014 by Bochkanov Sergey *************************************************************************/ public static double logisticcalc4(double x, double a, double b, double c, double d, alglib.xparams _params) { double result = 0; alglib.ap.assert(math.isfinite(x), "LogisticCalc4: X is not finite"); alglib.ap.assert(math.isfinite(a), "LogisticCalc4: A is not finite"); alglib.ap.assert(math.isfinite(b), "LogisticCalc4: B is not finite"); alglib.ap.assert(math.isfinite(c), "LogisticCalc4: C is not finite"); alglib.ap.assert(math.isfinite(d), "LogisticCalc4: D is not finite"); alglib.ap.assert((double)(x)>=(double)(0), "LogisticCalc4: X is negative"); alglib.ap.assert((double)(c)>(double)(0), "LogisticCalc4: C is non-positive"); // // Check for degenerate cases // if( (double)(b)==(double)(0) ) { result = 0.5*(a+d); return result; } if( (double)(x)==(double)(0) ) { if( (double)(b)>(double)(0) ) { result = a; } else { result = d; } return result; } // // General case // result = d+(a-d)/(1.0+Math.Pow(x/c, b)); alglib.ap.assert(math.isfinite(result), "LogisticCalc4: overflow during calculations"); return result; } /************************************************************************* This function calculates value of five-parameter logistic (5PL) model at specified point X. 5PL model has following form: F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) INPUT PARAMETERS: X - current point, X>=0: * zero X is correctly handled even for B<=0 * negative X results in exception. A, B, C, D, G- parameters of 5PL model: * A is unconstrained * B is unconstrained; zero or negative values are handled correctly. * C>0, non-positive value results in exception * D is unconstrained * G>0, non-positive value results in exception RESULT: model value at X NOTE: if B=0, denominator is assumed to be equal to Power(2.0,G) even for zero X (strictly speaking, 0^0 is undefined). NOTE: this function also throws exception if all input parameters are correct, but overflow was detected during calculations. NOTE: this function performs a lot of checks; if you need really high performance, consider evaluating model yourself, without checking for degenerate cases. -- ALGLIB PROJECT -- Copyright 14.05.2014 by Bochkanov Sergey *************************************************************************/ public static double logisticcalc5(double x, double a, double b, double c, double d, double g, alglib.xparams _params) { double result = 0; alglib.ap.assert(math.isfinite(x), "LogisticCalc5: X is not finite"); alglib.ap.assert(math.isfinite(a), "LogisticCalc5: A is not finite"); alglib.ap.assert(math.isfinite(b), "LogisticCalc5: B is not finite"); alglib.ap.assert(math.isfinite(c), "LogisticCalc5: C is not finite"); alglib.ap.assert(math.isfinite(d), "LogisticCalc5: D is not finite"); alglib.ap.assert(math.isfinite(g), "LogisticCalc5: G is not finite"); alglib.ap.assert((double)(x)>=(double)(0), "LogisticCalc5: X is negative"); alglib.ap.assert((double)(c)>(double)(0), "LogisticCalc5: C is non-positive"); alglib.ap.assert((double)(g)>(double)(0), "LogisticCalc5: G is non-positive"); // // Check for degenerate cases // if( (double)(b)==(double)(0) ) { result = d+(a-d)/Math.Pow(2.0, g); return result; } if( (double)(x)==(double)(0) ) { if( (double)(b)>(double)(0) ) { result = a; } else { result = d; } return result; } // // General case // result = d+(a-d)/Math.Pow(1.0+Math.Pow(x/c, b), g); alglib.ap.assert(math.isfinite(result), "LogisticCalc5: overflow during calculations"); return result; } /************************************************************************* This function fits four-parameter logistic (4PL) model to data provided by user. 4PL model has following form: F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) Here: * A, D - unconstrained (see LogisticFit4EC() for constrained 4PL) * B>=0 * C>0 IMPORTANT: output of this function is constrained in such way that B>0. Because 4PL model is symmetric with respect to B, there is no need to explore B<0. Constraining B makes algorithm easier to stabilize and debug. Users who for some reason prefer to work with negative B's should transform output themselves (swap A and D, replace B by -B). 4PL fitting is implemented as follows: * we perform small number of restarts from random locations which helps to solve problem of bad local extrema. Locations are only partially random - we use input data to determine good initial guess, but we include controlled amount of randomness. * we perform Levenberg-Marquardt fitting with very tight constraints on parameters B and C - it allows us to find good initial guess for the second stage without risk of running into "flat spot". * second Levenberg-Marquardt round is performed without excessive constraints. Results from the previous round are used as initial guess. * after fitting is done, we compare results with best values found so far, rewrite "best solution" if needed, and move to next random location. Overall algorithm is very stable and is not prone to bad local extrema. Furthermore, it automatically scales when input data have very large or very small range. INPUT PARAMETERS: X - array[N], stores X-values. MUST include only non-negative numbers (but may include zero values). Can be unsorted. Y - array[N], values to fit. N - number of points. If N is less than length of X/Y, only leading N elements are used. OUTPUT PARAMETERS: A, B, C, D- parameters of 4PL model Rep - fitting report. This structure has many fields, but ONLY ONES LISTED BELOW ARE SET: * Rep.IterationsCount - number of iterations performed * Rep.RMSError - root-mean-square error * Rep.AvgError - average absolute error * Rep.AvgRelError - average relative error (calculated for non-zero Y-values) * Rep.MaxError - maximum absolute error * Rep.R2 - coefficient of determination, R-squared. This coefficient is calculated as R2=1-RSS/TSS (in case of nonlinear regression there are multiple ways to define R2, each of them giving different results). NOTE: for stability reasons the B parameter is restricted by [1/1000,1000] range. It prevents algorithm from making trial steps deep into the area of bad parameters. NOTE: after you obtained coefficients, you can evaluate model with LogisticCalc4() function. NOTE: if you need better control over fitting process than provided by this function, you may use LogisticFit45X(). NOTE: step is automatically scaled according to scale of parameters being fitted before we compare its length with EpsX. Thus, this function can be used to fit data with very small or very large values without changing EpsX. -- ALGLIB PROJECT -- Copyright 14.02.2014 by Bochkanov Sergey *************************************************************************/ public static void logisticfit4(double[] x, double[] y, int n, ref double a, ref double b, ref double c, ref double d, lsfitreport rep, alglib.xparams _params) { double g = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); a = 0; b = 0; c = 0; d = 0; logisticfit45x(x, y, n, Double.NaN, Double.NaN, true, 0.0, 0.0, 0, ref a, ref b, ref c, ref d, ref g, rep, _params); } /************************************************************************* This function fits four-parameter logistic (4PL) model to data provided by user, with optional constraints on parameters A and D. 4PL model has following form: F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) Here: * A, D - with optional equality constraints * B>=0 * C>0 IMPORTANT: output of this function is constrained in such way that B>0. Because 4PL model is symmetric with respect to B, there is no need to explore B<0. Constraining B makes algorithm easier to stabilize and debug. Users who for some reason prefer to work with negative B's should transform output themselves (swap A and D, replace B by -B). 4PL fitting is implemented as follows: * we perform small number of restarts from random locations which helps to solve problem of bad local extrema. Locations are only partially random - we use input data to determine good initial guess, but we include controlled amount of randomness. * we perform Levenberg-Marquardt fitting with very tight constraints on parameters B and C - it allows us to find good initial guess for the second stage without risk of running into "flat spot". * second Levenberg-Marquardt round is performed without excessive constraints. Results from the previous round are used as initial guess. * after fitting is done, we compare results with best values found so far, rewrite "best solution" if needed, and move to next random location. Overall algorithm is very stable and is not prone to bad local extrema. Furthermore, it automatically scales when input data have very large or very small range. INPUT PARAMETERS: X - array[N], stores X-values. MUST include only non-negative numbers (but may include zero values). Can be unsorted. Y - array[N], values to fit. N - number of points. If N is less than length of X/Y, only leading N elements are used. CnstrLeft- optional equality constraint for model value at the left boundary (at X=0). Specify NAN (Not-a-Number) if you do not need constraint on the model value at X=0 (in C++ you can pass alglib::fp_nan as parameter, in C# it will be Double.NaN). See below, section "EQUALITY CONSTRAINTS" for more information about constraints. CnstrRight- optional equality constraint for model value at X=infinity. Specify NAN (Not-a-Number) if you do not need constraint on the model value (in C++ you can pass alglib::fp_nan as parameter, in C# it will be Double.NaN). See below, section "EQUALITY CONSTRAINTS" for more information about constraints. OUTPUT PARAMETERS: A, B, C, D- parameters of 4PL model Rep - fitting report. This structure has many fields, but ONLY ONES LISTED BELOW ARE SET: * Rep.IterationsCount - number of iterations performed * Rep.RMSError - root-mean-square error * Rep.AvgError - average absolute error * Rep.AvgRelError - average relative error (calculated for non-zero Y-values) * Rep.MaxError - maximum absolute error * Rep.R2 - coefficient of determination, R-squared. This coefficient is calculated as R2=1-RSS/TSS (in case of nonlinear regression there are multiple ways to define R2, each of them giving different results). NOTE: for stability reasons the B parameter is restricted by [1/1000,1000] range. It prevents algorithm from making trial steps deep into the area of bad parameters. NOTE: after you obtained coefficients, you can evaluate model with LogisticCalc4() function. NOTE: if you need better control over fitting process than provided by this function, you may use LogisticFit45X(). NOTE: step is automatically scaled according to scale of parameters being fitted before we compare its length with EpsX. Thus, this function can be used to fit data with very small or very large values without changing EpsX. EQUALITY CONSTRAINTS ON PARAMETERS 4PL/5PL solver supports equality constraints on model values at the left boundary (X=0) and right boundary (X=infinity). These constraints are completely optional and you can specify both of them, only one - or no constraints at all. Parameter CnstrLeft contains left constraint (or NAN for unconstrained fitting), and CnstrRight contains right one. For 4PL, left constraint ALWAYS corresponds to parameter A, and right one is ALWAYS constraint on D. That's because 4PL model is normalized in such way that B>=0. -- ALGLIB PROJECT -- Copyright 14.02.2014 by Bochkanov Sergey *************************************************************************/ public static void logisticfit4ec(double[] x, double[] y, int n, double cnstrleft, double cnstrright, ref double a, ref double b, ref double c, ref double d, lsfitreport rep, alglib.xparams _params) { double g = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); a = 0; b = 0; c = 0; d = 0; logisticfit45x(x, y, n, cnstrleft, cnstrright, true, 0.0, 0.0, 0, ref a, ref b, ref c, ref d, ref g, rep, _params); } /************************************************************************* This function fits five-parameter logistic (5PL) model to data provided by user. 5PL model has following form: F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) Here: * A, D - unconstrained * B - unconstrained * C>0 * G>0 IMPORTANT: unlike in 4PL fitting, output of this function is NOT constrained in such way that B is guaranteed to be positive. Furthermore, unlike 4PL, 5PL model is NOT symmetric with respect to B, so you can NOT transform model to equivalent one, with B having desired sign (>0 or <0). 5PL fitting is implemented as follows: * we perform small number of restarts from random locations which helps to solve problem of bad local extrema. Locations are only partially random - we use input data to determine good initial guess, but we include controlled amount of randomness. * we perform Levenberg-Marquardt fitting with very tight constraints on parameters B and C - it allows us to find good initial guess for the second stage without risk of running into "flat spot". Parameter G is fixed at G=1. * second Levenberg-Marquardt round is performed without excessive constraints on B and C, but with G still equal to 1. Results from the previous round are used as initial guess. * third Levenberg-Marquardt round relaxes constraints on G and tries two different models - one with B>0 and one with B<0. * after fitting is done, we compare results with best values found so far, rewrite "best solution" if needed, and move to next random location. Overall algorithm is very stable and is not prone to bad local extrema. Furthermore, it automatically scales when input data have very large or very small range. INPUT PARAMETERS: X - array[N], stores X-values. MUST include only non-negative numbers (but may include zero values). Can be unsorted. Y - array[N], values to fit. N - number of points. If N is less than length of X/Y, only leading N elements are used. OUTPUT PARAMETERS: A,B,C,D,G- parameters of 5PL model Rep - fitting report. This structure has many fields, but ONLY ONES LISTED BELOW ARE SET: * Rep.IterationsCount - number of iterations performed * Rep.RMSError - root-mean-square error * Rep.AvgError - average absolute error * Rep.AvgRelError - average relative error (calculated for non-zero Y-values) * Rep.MaxError - maximum absolute error * Rep.R2 - coefficient of determination, R-squared. This coefficient is calculated as R2=1-RSS/TSS (in case of nonlinear regression there are multiple ways to define R2, each of them giving different results). NOTE: for better stability B parameter is restricted by [+-1/1000,+-1000] range, and G is restricted by [1/10,10] range. It prevents algorithm from making trial steps deep into the area of bad parameters. NOTE: after you obtained coefficients, you can evaluate model with LogisticCalc5() function. NOTE: if you need better control over fitting process than provided by this function, you may use LogisticFit45X(). NOTE: step is automatically scaled according to scale of parameters being fitted before we compare its length with EpsX. Thus, this function can be used to fit data with very small or very large values without changing EpsX. -- ALGLIB PROJECT -- Copyright 14.02.2014 by Bochkanov Sergey *************************************************************************/ public static void logisticfit5(double[] x, double[] y, int n, ref double a, ref double b, ref double c, ref double d, ref double g, lsfitreport rep, alglib.xparams _params) { x = (double[])x.Clone(); y = (double[])y.Clone(); a = 0; b = 0; c = 0; d = 0; g = 0; logisticfit45x(x, y, n, Double.NaN, Double.NaN, false, 0.0, 0.0, 0, ref a, ref b, ref c, ref d, ref g, rep, _params); } /************************************************************************* This function fits five-parameter logistic (5PL) model to data provided by user, subject to optional equality constraints on parameters A and D. 5PL model has following form: F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) Here: * A, D - with optional equality constraints * B - unconstrained * C>0 * G>0 IMPORTANT: unlike in 4PL fitting, output of this function is NOT constrained in such way that B is guaranteed to be positive. Furthermore, unlike 4PL, 5PL model is NOT symmetric with respect to B, so you can NOT transform model to equivalent one, with B having desired sign (>0 or <0). 5PL fitting is implemented as follows: * we perform small number of restarts from random locations which helps to solve problem of bad local extrema. Locations are only partially random - we use input data to determine good initial guess, but we include controlled amount of randomness. * we perform Levenberg-Marquardt fitting with very tight constraints on parameters B and C - it allows us to find good initial guess for the second stage without risk of running into "flat spot". Parameter G is fixed at G=1. * second Levenberg-Marquardt round is performed without excessive constraints on B and C, but with G still equal to 1. Results from the previous round are used as initial guess. * third Levenberg-Marquardt round relaxes constraints on G and tries two different models - one with B>0 and one with B<0. * after fitting is done, we compare results with best values found so far, rewrite "best solution" if needed, and move to next random location. Overall algorithm is very stable and is not prone to bad local extrema. Furthermore, it automatically scales when input data have very large or very small range. INPUT PARAMETERS: X - array[N], stores X-values. MUST include only non-negative numbers (but may include zero values). Can be unsorted. Y - array[N], values to fit. N - number of points. If N is less than length of X/Y, only leading N elements are used. CnstrLeft- optional equality constraint for model value at the left boundary (at X=0). Specify NAN (Not-a-Number) if you do not need constraint on the model value at X=0 (in C++ you can pass alglib::fp_nan as parameter, in C# it will be Double.NaN). See below, section "EQUALITY CONSTRAINTS" for more information about constraints. CnstrRight- optional equality constraint for model value at X=infinity. Specify NAN (Not-a-Number) if you do not need constraint on the model value (in C++ you can pass alglib::fp_nan as parameter, in C# it will be Double.NaN). See below, section "EQUALITY CONSTRAINTS" for more information about constraints. OUTPUT PARAMETERS: A,B,C,D,G- parameters of 5PL model Rep - fitting report. This structure has many fields, but ONLY ONES LISTED BELOW ARE SET: * Rep.IterationsCount - number of iterations performed * Rep.RMSError - root-mean-square error * Rep.AvgError - average absolute error * Rep.AvgRelError - average relative error (calculated for non-zero Y-values) * Rep.MaxError - maximum absolute error * Rep.R2 - coefficient of determination, R-squared. This coefficient is calculated as R2=1-RSS/TSS (in case of nonlinear regression there are multiple ways to define R2, each of them giving different results). NOTE: for better stability B parameter is restricted by [+-1/1000,+-1000] range, and G is restricted by [1/10,10] range. It prevents algorithm from making trial steps deep into the area of bad parameters. NOTE: after you obtained coefficients, you can evaluate model with LogisticCalc5() function. NOTE: if you need better control over fitting process than provided by this function, you may use LogisticFit45X(). NOTE: step is automatically scaled according to scale of parameters being fitted before we compare its length with EpsX. Thus, this function can be used to fit data with very small or very large values without changing EpsX. EQUALITY CONSTRAINTS ON PARAMETERS 5PL solver supports equality constraints on model values at the left boundary (X=0) and right boundary (X=infinity). These constraints are completely optional and you can specify both of them, only one - or no constraints at all. Parameter CnstrLeft contains left constraint (or NAN for unconstrained fitting), and CnstrRight contains right one. Unlike 4PL one, 5PL model is NOT symmetric with respect to change in sign of B. Thus, negative B's are possible, and left constraint may constrain parameter A (for positive B's) - or parameter D (for negative B's). Similarly changes meaning of right constraint. You do not have to decide what parameter to constrain - algorithm will automatically determine correct parameters as fitting progresses. However, question highlighted above is important when you interpret fitting results. -- ALGLIB PROJECT -- Copyright 14.02.2014 by Bochkanov Sergey *************************************************************************/ public static void logisticfit5ec(double[] x, double[] y, int n, double cnstrleft, double cnstrright, ref double a, ref double b, ref double c, ref double d, ref double g, lsfitreport rep, alglib.xparams _params) { x = (double[])x.Clone(); y = (double[])y.Clone(); a = 0; b = 0; c = 0; d = 0; g = 0; logisticfit45x(x, y, n, cnstrleft, cnstrright, false, 0.0, 0.0, 0, ref a, ref b, ref c, ref d, ref g, rep, _params); } /************************************************************************* This is "expert" 4PL/5PL fitting function, which can be used if you need better control over fitting process than provided by LogisticFit4() or LogisticFit5(). This function fits model of the form F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) (4PL model) or F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) (5PL model) Here: * A, D - unconstrained * B>=0 for 4PL, unconstrained for 5PL * C>0 * G>0 (if present) INPUT PARAMETERS: X - array[N], stores X-values. MUST include only non-negative numbers (but may include zero values). Can be unsorted. Y - array[N], values to fit. N - number of points. If N is less than length of X/Y, only leading N elements are used. CnstrLeft- optional equality constraint for model value at the left boundary (at X=0). Specify NAN (Not-a-Number) if you do not need constraint on the model value at X=0 (in C++ you can pass alglib::fp_nan as parameter, in C# it will be Double.NaN). See below, section "EQUALITY CONSTRAINTS" for more information about constraints. CnstrRight- optional equality constraint for model value at X=infinity. Specify NAN (Not-a-Number) if you do not need constraint on the model value (in C++ you can pass alglib::fp_nan as parameter, in C# it will be Double.NaN). See below, section "EQUALITY CONSTRAINTS" for more information about constraints. Is4PL - whether 4PL or 5PL models are fitted LambdaV - regularization coefficient, LambdaV>=0. Set it to zero unless you know what you are doing. EpsX - stopping condition (step size), EpsX>=0. Zero value means that small step is automatically chosen. See notes below for more information. RsCnt - number of repeated restarts from random points. 4PL/5PL models are prone to problem of bad local extrema. Utilizing multiple random restarts allows us to improve algorithm convergence. RsCnt>=0. Zero value means that function automatically choose small amount of restarts (recommended). OUTPUT PARAMETERS: A, B, C, D- parameters of 4PL model G - parameter of 5PL model; for Is4PL=True, G=1 is returned. Rep - fitting report. This structure has many fields, but ONLY ONES LISTED BELOW ARE SET: * Rep.IterationsCount - number of iterations performed * Rep.RMSError - root-mean-square error * Rep.AvgError - average absolute error * Rep.AvgRelError - average relative error (calculated for non-zero Y-values) * Rep.MaxError - maximum absolute error * Rep.R2 - coefficient of determination, R-squared. This coefficient is calculated as R2=1-RSS/TSS (in case of nonlinear regression there are multiple ways to define R2, each of them giving different results). NOTE: for better stability B parameter is restricted by [+-1/1000,+-1000] range, and G is restricted by [1/10,10] range. It prevents algorithm from making trial steps deep into the area of bad parameters. NOTE: after you obtained coefficients, you can evaluate model with LogisticCalc5() function. NOTE: step is automatically scaled according to scale of parameters being fitted before we compare its length with EpsX. Thus, this function can be used to fit data with very small or very large values without changing EpsX. EQUALITY CONSTRAINTS ON PARAMETERS 4PL/5PL solver supports equality constraints on model values at the left boundary (X=0) and right boundary (X=infinity). These constraints are completely optional and you can specify both of them, only one - or no constraints at all. Parameter CnstrLeft contains left constraint (or NAN for unconstrained fitting), and CnstrRight contains right one. For 4PL, left constraint ALWAYS corresponds to parameter A, and right one is ALWAYS constraint on D. That's because 4PL model is normalized in such way that B>=0. For 5PL model things are different. Unlike 4PL one, 5PL model is NOT symmetric with respect to change in sign of B. Thus, negative B's are possible, and left constraint may constrain parameter A (for positive B's) - or parameter D (for negative B's). Similarly changes meaning of right constraint. You do not have to decide what parameter to constrain - algorithm will automatically determine correct parameters as fitting progresses. However, question highlighted above is important when you interpret fitting results. -- ALGLIB PROJECT -- Copyright 14.02.2014 by Bochkanov Sergey *************************************************************************/ public static void logisticfit45x(double[] x, double[] y, int n, double cnstrleft, double cnstrright, bool is4pl, double lambdav, double epsx, int rscnt, ref double a, ref double b, ref double c, ref double d, ref double g, lsfitreport rep, alglib.xparams _params) { int i = 0; int outerit = 0; int nz = 0; double v = 0; double[] p0 = new double[0]; double[] p1 = new double[0]; double[] p2 = new double[0]; double[] bndl = new double[0]; double[] bndu = new double[0]; double[] s = new double[0]; double[] bndl1 = new double[0]; double[] bndu1 = new double[0]; double[] bndl2 = new double[0]; double[] bndu2 = new double[0]; double[,] z = new double[0,0]; hqrnd.hqrndstate rs = new hqrnd.hqrndstate(); minlm.minlmstate state = new minlm.minlmstate(); minlm.minlmreport replm = new minlm.minlmreport(); int maxits = 0; double fbest = 0; double flast = 0; double scalex = 0; double scaley = 0; double[] bufx = new double[0]; double[] bufy = new double[0]; double fposb = 0; double fnegb = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); a = 0; b = 0; c = 0; d = 0; g = 0; alglib.ap.assert(math.isfinite(epsx), "LogisticFitX: EpsX is infinite/NAN"); alglib.ap.assert(math.isfinite(lambdav), "LogisticFitX: LambdaV is infinite/NAN"); alglib.ap.assert(math.isfinite(cnstrleft) || Double.IsNaN(cnstrleft), "LogisticFitX: CnstrLeft is NOT finite or NAN"); alglib.ap.assert(math.isfinite(cnstrright) || Double.IsNaN(cnstrright), "LogisticFitX: CnstrRight is NOT finite or NAN"); alglib.ap.assert((double)(lambdav)>=(double)(0), "LogisticFitX: negative LambdaV"); alglib.ap.assert(n>0, "LogisticFitX: N<=0"); alglib.ap.assert(rscnt>=0, "LogisticFitX: RsCnt<0"); alglib.ap.assert((double)(epsx)>=(double)(0), "LogisticFitX: EpsX<0"); alglib.ap.assert(alglib.ap.len(x)>=n, "LogisticFitX: Length(X)=n, "LogisticFitX: Length(Y)=(double)(0), "LogisticFitX: some X[] are negative"); nz = n; for(i=0; i<=n-1; i++) { if( (double)(x[i])>(double)(0) ) { nz = i; break; } } // // For NZ=N (all X[] are zero) special code is used. // For NZ(double)(0), "LogisticFitX: internal error"); v = 0.0; for(i=0; i<=n-1; i++) { v = v+y[i]; } v = v/n; scaley = 0.0; for(i=0; i<=n-1; i++) { scaley = scaley+math.sqr(y[i]-v); } scaley = Math.Sqrt(scaley/n); if( (double)(scaley)==(double)(0) ) { scaley = 1.0; } s = new double[5]; s[0] = scaley; s[1] = 0.1; s[2] = scalex; s[3] = scaley; s[4] = 0.1; p0 = new double[5]; p0[0] = 0; p0[1] = 0; p0[2] = 0; p0[3] = 0; p0[4] = 0; bndl = new double[5]; bndu = new double[5]; bndl1 = new double[5]; bndu1 = new double[5]; bndl2 = new double[5]; bndu2 = new double[5]; minlm.minlmcreatevj(5, n+5, p0, state, _params); minlm.minlmsetscale(state, s, _params); minlm.minlmsetcond(state, epsx, maxits, _params); minlm.minlmsetxrep(state, true, _params); p1 = new double[5]; p2 = new double[5]; // // Is it 4PL problem? // if( is4pl ) { // // Run outer iterations // a = 0; b = 1; c = 1; d = 1; g = 1; fbest = math.maxrealnumber; for(outerit=0; outerit<=rscnt-1; outerit++) { // // Prepare initial point; use B>0 // if( math.isfinite(cnstrleft) ) { p1[0] = cnstrleft; } else { p1[0] = y[0]+0.15*scaley*(hqrnd.hqrnduniformr(rs, _params)-0.5); } p1[1] = 0.5+hqrnd.hqrnduniformr(rs, _params); p1[2] = x[nz+hqrnd.hqrnduniformi(rs, n-nz, _params)]; if( math.isfinite(cnstrright) ) { p1[3] = cnstrright; } else { p1[3] = y[n-1]+0.25*scaley*(hqrnd.hqrnduniformr(rs, _params)-0.5); } p1[4] = 1.0; // // Run optimization with tight constraints and increased regularization // if( math.isfinite(cnstrleft) ) { bndl[0] = cnstrleft; bndu[0] = cnstrleft; } else { bndl[0] = Double.NegativeInfinity; bndu[0] = Double.PositiveInfinity; } bndl[1] = 0.5; bndu[1] = 2.0; bndl[2] = 0.5*scalex; bndu[2] = 2.0*scalex; if( math.isfinite(cnstrright) ) { bndl[3] = cnstrright; bndu[3] = cnstrright; } else { bndl[3] = Double.NegativeInfinity; bndu[3] = Double.PositiveInfinity; } bndl[4] = 1.0; bndu[4] = 1.0; minlm.minlmsetbc(state, bndl, bndu, _params); logisticfitinternal(x, y, n, is4pl, 100*lambdav, state, replm, ref p1, ref flast, _params); rep.iterationscount = rep.iterationscount+replm.iterationscount; // // Relax constraints, run optimization one more time // bndl[1] = 0.1; bndu[1] = 10.0; bndl[2] = math.machineepsilon*scalex; bndu[2] = scalex/math.machineepsilon; minlm.minlmsetbc(state, bndl, bndu, _params); logisticfitinternal(x, y, n, is4pl, lambdav, state, replm, ref p1, ref flast, _params); rep.iterationscount = rep.iterationscount+replm.iterationscount; // // Relax constraints more, run optimization one more time // bndl[1] = 0.01; bndu[1] = 100.0; minlm.minlmsetbc(state, bndl, bndu, _params); logisticfitinternal(x, y, n, is4pl, lambdav, state, replm, ref p1, ref flast, _params); rep.iterationscount = rep.iterationscount+replm.iterationscount; // // Relax constraints ever more, run optimization one more time // bndl[1] = 0.001; bndu[1] = 1000.0; minlm.minlmsetbc(state, bndl, bndu, _params); logisticfitinternal(x, y, n, is4pl, lambdav, state, replm, ref p1, ref flast, _params); rep.iterationscount = rep.iterationscount+replm.iterationscount; // // Compare results with best value found so far. // if( (double)(flast)<(double)(fbest) ) { a = p1[0]; b = p1[1]; c = p1[2]; d = p1[3]; g = p1[4]; fbest = flast; } } logisticfit45errors(x, y, n, a, b, c, d, g, rep, _params); return; } // // Well.... we have 5PL fit, and we have to test two separate branches: // B>0 and B<0, because of asymmetry in the curve. First, we run optimization // with tight constraints two times, in order to determine better sign for B. // // Run outer iterations // a = 0; b = 1; c = 1; d = 1; g = 1; fbest = math.maxrealnumber; for(outerit=0; outerit<=rscnt-1; outerit++) { // // First, we try positive B. // p1[0] = y[0]+0.15*scaley*(hqrnd.hqrnduniformr(rs, _params)-0.5); p1[1] = 0.5+hqrnd.hqrnduniformr(rs, _params); p1[2] = x[nz+hqrnd.hqrnduniformi(rs, n-nz, _params)]; p1[3] = y[n-1]+0.25*scaley*(hqrnd.hqrnduniformr(rs, _params)-0.5); p1[4] = 1.0; bndl1[0] = Double.NegativeInfinity; bndu1[0] = Double.PositiveInfinity; bndl1[1] = 0.5; bndu1[1] = 2.0; bndl1[2] = 0.5*scalex; bndu1[2] = 2.0*scalex; bndl1[3] = Double.NegativeInfinity; bndu1[3] = Double.PositiveInfinity; bndl1[4] = 0.5; bndu1[4] = 2.0; if( math.isfinite(cnstrleft) ) { p1[0] = cnstrleft; bndl1[0] = cnstrleft; bndu1[0] = cnstrleft; } if( math.isfinite(cnstrright) ) { p1[3] = cnstrright; bndl1[3] = cnstrright; bndu1[3] = cnstrright; } minlm.minlmsetbc(state, bndl1, bndu1, _params); logisticfitinternal(x, y, n, is4pl, 100*lambdav, state, replm, ref p1, ref fposb, _params); rep.iterationscount = rep.iterationscount+replm.iterationscount; // // Second attempt - with negative B (constraints are still tight). // p2[0] = y[n-1]+0.15*scaley*(hqrnd.hqrnduniformr(rs, _params)-0.5); p2[1] = -(0.5+hqrnd.hqrnduniformr(rs, _params)); p2[2] = x[nz+hqrnd.hqrnduniformi(rs, n-nz, _params)]; p2[3] = y[0]+0.25*scaley*(hqrnd.hqrnduniformr(rs, _params)-0.5); p2[4] = 1.0; bndl2[0] = Double.NegativeInfinity; bndu2[0] = Double.PositiveInfinity; bndl2[1] = -2.0; bndu2[1] = -0.5; bndl2[2] = 0.5*scalex; bndu2[2] = 2.0*scalex; bndl2[3] = Double.NegativeInfinity; bndu2[3] = Double.PositiveInfinity; bndl2[4] = 0.5; bndu2[4] = 2.0; if( math.isfinite(cnstrleft) ) { p2[3] = cnstrleft; bndl2[3] = cnstrleft; bndu2[3] = cnstrleft; } if( math.isfinite(cnstrright) ) { p2[0] = cnstrright; bndl2[0] = cnstrright; bndu2[0] = cnstrright; } minlm.minlmsetbc(state, bndl2, bndu2, _params); logisticfitinternal(x, y, n, is4pl, 100*lambdav, state, replm, ref p2, ref fnegb, _params); rep.iterationscount = rep.iterationscount+replm.iterationscount; // // Select best version of B sign // if( (double)(fposb)<(double)(fnegb) ) { // // Prepare relaxed constraints assuming that B is positive // bndl1[1] = 0.1; bndu1[1] = 10.0; bndl1[2] = math.machineepsilon*scalex; bndu1[2] = scalex/math.machineepsilon; bndl1[4] = 0.1; bndu1[4] = 10.0; minlm.minlmsetbc(state, bndl1, bndu1, _params); logisticfitinternal(x, y, n, is4pl, lambdav, state, replm, ref p1, ref flast, _params); rep.iterationscount = rep.iterationscount+replm.iterationscount; // // Prepare stronger relaxation of constraints // bndl1[1] = 0.01; bndu1[1] = 100.0; minlm.minlmsetbc(state, bndl1, bndu1, _params); logisticfitinternal(x, y, n, is4pl, lambdav, state, replm, ref p1, ref flast, _params); rep.iterationscount = rep.iterationscount+replm.iterationscount; // // Prepare stronger relaxation of constraints // bndl1[1] = 0.001; bndu1[1] = 1000.0; minlm.minlmsetbc(state, bndl1, bndu1, _params); logisticfitinternal(x, y, n, is4pl, lambdav, state, replm, ref p1, ref flast, _params); rep.iterationscount = rep.iterationscount+replm.iterationscount; // // Compare results with best value found so far. // if( (double)(flast)<(double)(fbest) ) { a = p1[0]; b = p1[1]; c = p1[2]; d = p1[3]; g = p1[4]; fbest = flast; } } else { // // Prepare relaxed constraints assuming that B is negative // bndl2[1] = -10.0; bndu2[1] = -0.1; bndl2[2] = math.machineepsilon*scalex; bndu2[2] = scalex/math.machineepsilon; bndl2[4] = 0.1; bndu2[4] = 10.0; minlm.minlmsetbc(state, bndl2, bndu2, _params); logisticfitinternal(x, y, n, is4pl, lambdav, state, replm, ref p2, ref flast, _params); rep.iterationscount = rep.iterationscount+replm.iterationscount; // // Prepare stronger relaxation // bndl2[1] = -100.0; bndu2[1] = -0.01; minlm.minlmsetbc(state, bndl2, bndu2, _params); logisticfitinternal(x, y, n, is4pl, lambdav, state, replm, ref p2, ref flast, _params); rep.iterationscount = rep.iterationscount+replm.iterationscount; // // Prepare stronger relaxation // bndl2[1] = -1000.0; bndu2[1] = -0.001; minlm.minlmsetbc(state, bndl2, bndu2, _params); logisticfitinternal(x, y, n, is4pl, lambdav, state, replm, ref p2, ref flast, _params); rep.iterationscount = rep.iterationscount+replm.iterationscount; // // Compare results with best value found so far. // if( (double)(flast)<(double)(fbest) ) { a = p2[0]; b = p2[1]; c = p2[2]; d = p2[3]; g = p2[4]; fbest = flast; } } } logisticfit45errors(x, y, n, a, b, c, d, g, rep, _params); } /************************************************************************* Weghted rational least squares fitting using Floater-Hormann rational functions with optimal D chosen from [0,9], with constraints and individual weights. Equidistant grid with M node on [min(x),max(x)] is used to build basis functions. Different values of D are tried, optimal D (least WEIGHTED root mean square error) is chosen. Task is linear, so linear least squares solver is used. Complexity of this computational scheme is O(N*M^2) (mostly dominated by the least squares solver). SEE ALSO * BarycentricFitFloaterHormann(), "lightweight" fitting without invididual weights and constraints. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - points, array[0..N-1]. Y - function values, array[0..N-1]. W - weights, array[0..N-1] Each summand in square sum of approximation deviations from given values is multiplied by the square of corresponding weight. Fill it by 1's if you don't want to solve weighted task. N - number of points, N>0. XC - points where function values/derivatives are constrained, array[0..K-1]. YC - values of constraints, array[0..K-1] DC - array[0..K-1], types of constraints: * DC[i]=0 means that S(XC[i])=YC[i] * DC[i]=1 means that S'(XC[i])=YC[i] SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS K - number of constraints, 0<=K=2. OUTPUT PARAMETERS: Info- same format as in LSFitLinearWC() subroutine. * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD -3 means inconsistent constraints -1 means another errors in parameters passed (N<=0, for example) B - barycentric interpolant. Rep - report, same format as in LSFitLinearWC() subroutine. Following fields are set: * DBest best value of the D parameter * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroutine doesn't calculate task's condition number for K<>0. SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: Setting constraints can lead to undesired results, like ill-conditioned behavior, or inconsistency being detected. From the other side, it allows us to improve quality of the fit. Here we summarize our experience with constrained barycentric interpolants: * excessive constraints can be inconsistent. Floater-Hormann basis functions aren't as flexible as splines (although they are very smooth). * the more evenly constraints are spread across [min(x),max(x)], the more chances that they will be consistent * the greater is M (given fixed constraints), the more chances that constraints will be consistent * in the general case, consistency of constraints IS NOT GUARANTEED. * in the several special cases, however, we CAN guarantee consistency. * one of this cases is constraints on the function VALUES at the interval boundaries. Note that consustency of the constraints on the function DERIVATIVES is NOT guaranteed (you can use in such cases cubic splines which are more flexible). * another special case is ONE constraint on the function value (OR, but not AND, derivative) anywhere in the interval Our final recommendation is to use constraints WHEN AND ONLY WHEN you can't solve your task without them. Anything beyond special cases given above is not guaranteed and may result in inconsistency. -- ALGLIB PROJECT -- Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentricfitfloaterhormannwc(double[] x, double[] y, double[] w, int n, double[] xc, double[] yc, int[] dc, int k, int m, ref int info, ratint.barycentricinterpolant b, barycentricfitreport rep, alglib.xparams _params) { int d = 0; int i = 0; double wrmscur = 0; double wrmsbest = 0; ratint.barycentricinterpolant locb = new ratint.barycentricinterpolant(); barycentricfitreport locrep = new barycentricfitreport(); int locinfo = 0; info = 0; alglib.ap.assert(n>0, "BarycentricFitFloaterHormannWC: N<=0!"); alglib.ap.assert(m>0, "BarycentricFitFloaterHormannWC: M<=0!"); alglib.ap.assert(k>=0, "BarycentricFitFloaterHormannWC: K<0!"); alglib.ap.assert(k=M!"); alglib.ap.assert(alglib.ap.len(x)>=n, "BarycentricFitFloaterHormannWC: Length(X)=n, "BarycentricFitFloaterHormannWC: Length(Y)=n, "BarycentricFitFloaterHormannWC: Length(W)=k, "BarycentricFitFloaterHormannWC: Length(XC)=k, "BarycentricFitFloaterHormannWC: Length(YC)=k, "BarycentricFitFloaterHormannWC: Length(DC)0, "BarycentricFitFloaterHormannWC: unexpected result from BarycentricFitWCFixedD!"); if( locinfo>0 ) { // // Calculate weghted RMS // wrmscur = 0; for(i=0; i<=n-1; i++) { wrmscur = wrmscur+math.sqr(w[i]*(y[i]-ratint.barycentriccalc(locb, x[i], _params))); } wrmscur = Math.Sqrt(wrmscur/n); if( (double)(wrmscur)<(double)(wrmsbest) || rep.dbest<0 ) { ratint.barycentriccopy(locb, b, _params); rep.dbest = d; info = 1; rep.rmserror = locrep.rmserror; rep.avgerror = locrep.avgerror; rep.avgrelerror = locrep.avgrelerror; rep.maxerror = locrep.maxerror; rep.taskrcond = locrep.taskrcond; wrmsbest = wrmscur; } } else { if( locinfo!=-3 && info<0 ) { info = locinfo; } } } } /************************************************************************* Rational least squares fitting using Floater-Hormann rational functions with optimal D chosen from [0,9]. Equidistant grid with M node on [min(x),max(x)] is used to build basis functions. Different values of D are tried, optimal D (least root mean square error) is chosen. Task is linear, so linear least squares solver is used. Complexity of this computational scheme is O(N*M^2) (mostly dominated by the least squares solver). ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - points, array[0..N-1]. Y - function values, array[0..N-1]. N - number of points, N>0. M - number of basis functions ( = number_of_nodes), M>=2. OUTPUT PARAMETERS: Info- same format as in LSFitLinearWC() subroutine. * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD -3 means inconsistent constraints B - barycentric interpolant. Rep - report, same format as in LSFitLinearWC() subroutine. Following fields are set: * DBest best value of the D parameter * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED -- ALGLIB PROJECT -- Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ public static void barycentricfitfloaterhormann(double[] x, double[] y, int n, int m, ref int info, ratint.barycentricinterpolant b, barycentricfitreport rep, alglib.xparams _params) { double[] w = new double[0]; double[] xc = new double[0]; double[] yc = new double[0]; int[] dc = new int[0]; int i = 0; info = 0; alglib.ap.assert(n>0, "BarycentricFitFloaterHormann: N<=0!"); alglib.ap.assert(m>0, "BarycentricFitFloaterHormann: M<=0!"); alglib.ap.assert(alglib.ap.len(x)>=n, "BarycentricFitFloaterHormann: Length(X)=n, "BarycentricFitFloaterHormann: Length(Y)0 * if given, only first N elements of X/Y/W are processed * if not given, automatically determined from X/Y/W sizes XC - points where spline values/derivatives are constrained, array[0..K-1]. YC - values of constraints, array[0..K-1] DC - array[0..K-1], types of constraints: * DC[i]=0 means that S(XC[i])=YC[i] * DC[i]=1 means that S'(XC[i])=YC[i] SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS K - number of constraints (optional): * 0<=K=4. OUTPUT PARAMETERS: Info- same format as in LSFitLinearWC() subroutine. * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD -3 means inconsistent constraints S - spline interpolant. Rep - report, same format as in LSFitLinearWC() subroutine. Following fields are set: * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroitine doesn't calculate task's condition number for K<>0. ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: Setting constraints can lead to undesired results, like ill-conditioned behavior, or inconsistency being detected. From the other side, it allows us to improve quality of the fit. Here we summarize our experience with constrained regression splines: * excessive constraints can be inconsistent. Splines are piecewise cubic functions, and it is easy to create an example, where large number of constraints concentrated in small area will result in inconsistency. Just because spline is not flexible enough to satisfy all of them. And same constraints spread across the [min(x),max(x)] will be perfectly consistent. * the more evenly constraints are spread across [min(x),max(x)], the more chances that they will be consistent * the greater is M (given fixed constraints), the more chances that constraints will be consistent * in the general case, consistency of constraints IS NOT GUARANTEED. * in the several special cases, however, we CAN guarantee consistency. * one of this cases is constraints on the function values AND/OR its derivatives at the interval boundaries. * another special case is ONE constraint on the function value (OR, but not AND, derivative) anywhere in the interval Our final recommendation is to use constraints WHEN AND ONLY WHEN you can't solve your task without them. Anything beyond special cases given above is not guaranteed and may result in inconsistency. -- ALGLIB PROJECT -- Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ public static void spline1dfitcubicwc(double[] x, double[] y, double[] w, int n, double[] xc, double[] yc, int[] dc, int k, int m, ref int info, spline1d.spline1dinterpolant s, spline1d.spline1dfitreport rep, alglib.xparams _params) { int i = 0; info = 0; alglib.ap.assert(n>=1, "Spline1DFitCubicWC: N<1!"); alglib.ap.assert(m>=4, "Spline1DFitCubicWC: M<4!"); alglib.ap.assert(k>=0, "Spline1DFitCubicWC: K<0!"); alglib.ap.assert(k=M!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DFitCubicWC: Length(X)=n, "Spline1DFitCubicWC: Length(Y)=n, "Spline1DFitCubicWC: Length(W)=k, "Spline1DFitCubicWC: Length(XC)=k, "Spline1DFitCubicWC: Length(YC)=k, "Spline1DFitCubicWC: Length(DC)0 * if given, only first N elements of X/Y/W are processed * if not given, automatically determined from X/Y/W sizes XC - points where spline values/derivatives are constrained, array[0..K-1]. YC - values of constraints, array[0..K-1] DC - array[0..K-1], types of constraints: * DC[i]=0 means that S(XC[i])=YC[i] * DC[i]=1 means that S'(XC[i])=YC[i] SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS K - number of constraints (optional): * 0<=K=4, M IS EVEN! OUTPUT PARAMETERS: Info- same format as in LSFitLinearW() subroutine: * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD -3 means inconsistent constraints -2 means odd M was passed (which is not supported) -1 means another errors in parameters passed (N<=0, for example) S - spline interpolant. Rep - report, same format as in LSFitLinearW() subroutine. Following fields are set: * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroitine doesn't calculate task's condition number for K<>0. IMPORTANT: this subroitine supports only even M's ORDER OF POINTS Subroutine automatically sorts points, so caller may pass unsorted array. SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: Setting constraints can lead to undesired results, like ill-conditioned behavior, or inconsistency being detected. From the other side, it allows us to improve quality of the fit. Here we summarize our experience with constrained regression splines: * excessive constraints can be inconsistent. Splines are piecewise cubic functions, and it is easy to create an example, where large number of constraints concentrated in small area will result in inconsistency. Just because spline is not flexible enough to satisfy all of them. And same constraints spread across the [min(x),max(x)] will be perfectly consistent. * the more evenly constraints are spread across [min(x),max(x)], the more chances that they will be consistent * the greater is M (given fixed constraints), the more chances that constraints will be consistent * in the general case, consistency of constraints is NOT GUARANTEED. * in the several special cases, however, we can guarantee consistency. * one of this cases is M>=4 and constraints on the function value (AND/OR its derivative) at the interval boundaries. * another special case is M>=4 and ONE constraint on the function value (OR, BUT NOT AND, derivative) anywhere in [min(x),max(x)] Our final recommendation is to use constraints WHEN AND ONLY when you can't solve your task without them. Anything beyond special cases given above is not guaranteed and may result in inconsistency. -- ALGLIB PROJECT -- Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ public static void spline1dfithermitewc(double[] x, double[] y, double[] w, int n, double[] xc, double[] yc, int[] dc, int k, int m, ref int info, spline1d.spline1dinterpolant s, spline1d.spline1dfitreport rep, alglib.xparams _params) { int i = 0; info = 0; alglib.ap.assert(n>=1, "Spline1DFitHermiteWC: N<1!"); alglib.ap.assert(m>=4, "Spline1DFitHermiteWC: M<4!"); alglib.ap.assert(m%2==0, "Spline1DFitHermiteWC: M is odd!"); alglib.ap.assert(k>=0, "Spline1DFitHermiteWC: K<0!"); alglib.ap.assert(k=M!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DFitHermiteWC: Length(X)=n, "Spline1DFitHermiteWC: Length(Y)=n, "Spline1DFitHermiteWC: Length(W)=k, "Spline1DFitHermiteWC: Length(XC)=k, "Spline1DFitHermiteWC: Length(YC)=k, "Spline1DFitHermiteWC: Length(DC)=1, "Spline1DFitCubic: N<1!"); alglib.ap.assert(m>=4, "Spline1DFitCubic: M<4!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DFitCubic: Length(X)=n, "Spline1DFitCubic: Length(Y)=1, "Spline1DFitHermite: N<1!"); alglib.ap.assert(m>=4, "Spline1DFitHermite: M<4!"); alglib.ap.assert(m%2==0, "Spline1DFitHermite: M is odd!"); alglib.ap.assert(alglib.ap.len(x)>=n, "Spline1DFitHermite: Length(X)=n, "Spline1DFitHermite: Length(Y)=1. M - number of basis functions, M>=1. OUTPUT PARAMETERS: Info - error code: * -4 internal SVD decomposition subroutine failed (very rare and for degenerate systems only) * -1 incorrect N/M were specified * 1 task is solved C - decomposition coefficients, array[0..M-1] Rep - fitting report. Following fields are set: * Rep.TaskRCond reciprocal of condition number * R2 non-adjusted coefficient of determination (non-weighted) * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED ERRORS IN PARAMETERS This solver also calculates different kinds of errors in parameters and fills corresponding fields of report: * Rep.CovPar covariance matrix for parameters, array[K,K]. * Rep.ErrPar errors in parameters, array[K], errpar = sqrt(diag(CovPar)) * Rep.ErrCurve vector of fit errors - standard deviations of empirical best-fit curve from "ideal" best-fit curve built with infinite number of samples, array[N]. errcurve = sqrt(diag(F*CovPar*F')), where F is functions matrix. * Rep.Noise vector of per-point estimates of noise, array[N] NOTE: noise in the data is estimated as follows: * for fitting without user-supplied weights all points are assumed to have same level of noise, which is estimated from the data * for fitting with user-supplied weights we assume that noise level in I-th point is inversely proportional to Ith weight. Coefficient of proportionality is estimated from the data. NOTE: we apply small amount of regularization when we invert squared Jacobian and calculate covariance matrix. It guarantees that algorithm won't divide by zero during inversion, but skews error estimates a bit (fractional error is about 10^-9). However, we believe that this difference is insignificant for all practical purposes except for the situation when you want to compare ALGLIB results with "reference" implementation up to the last significant digit. NOTE: covariance matrix is estimated using correction for degrees of freedom (covariances are divided by N-M instead of dividing by N). -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitlinearw(double[] y, double[] w, double[,] fmatrix, int n, int m, ref int info, ref double[] c, lsfitreport rep, alglib.xparams _params) { info = 0; c = new double[0]; alglib.ap.assert(n>=1, "LSFitLinearW: N<1!"); alglib.ap.assert(m>=1, "LSFitLinearW: M<1!"); alglib.ap.assert(alglib.ap.len(y)>=n, "LSFitLinearW: length(Y)=n, "LSFitLinearW: length(W)=n, "LSFitLinearW: rows(FMatrix)=m, "LSFitLinearW: cols(FMatrix)=1. M - number of basis functions, M>=1. K - number of constraints, 0 <= K < M K=0 corresponds to absence of constraints. OUTPUT PARAMETERS: Info - error code: * -4 internal SVD decomposition subroutine failed (very rare and for degenerate systems only) * -3 either too many constraints (M or more), degenerate constraints (some constraints are repetead twice) or inconsistent constraints were specified. * 1 task is solved C - decomposition coefficients, array[0..M-1] Rep - fitting report. Following fields are set: * R2 non-adjusted coefficient of determination (non-weighted) * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroitine doesn't calculate task's condition number for K<>0. ERRORS IN PARAMETERS This solver also calculates different kinds of errors in parameters and fills corresponding fields of report: * Rep.CovPar covariance matrix for parameters, array[K,K]. * Rep.ErrPar errors in parameters, array[K], errpar = sqrt(diag(CovPar)) * Rep.ErrCurve vector of fit errors - standard deviations of empirical best-fit curve from "ideal" best-fit curve built with infinite number of samples, array[N]. errcurve = sqrt(diag(F*CovPar*F')), where F is functions matrix. * Rep.Noise vector of per-point estimates of noise, array[N] IMPORTANT: errors in parameters are calculated without taking into account boundary/linear constraints! Presence of constraints changes distribution of errors, but there is no easy way to account for constraints when you calculate covariance matrix. NOTE: noise in the data is estimated as follows: * for fitting without user-supplied weights all points are assumed to have same level of noise, which is estimated from the data * for fitting with user-supplied weights we assume that noise level in I-th point is inversely proportional to Ith weight. Coefficient of proportionality is estimated from the data. NOTE: we apply small amount of regularization when we invert squared Jacobian and calculate covariance matrix. It guarantees that algorithm won't divide by zero during inversion, but skews error estimates a bit (fractional error is about 10^-9). However, we believe that this difference is insignificant for all practical purposes except for the situation when you want to compare ALGLIB results with "reference" implementation up to the last significant digit. NOTE: covariance matrix is estimated using correction for degrees of freedom (covariances are divided by N-M instead of dividing by N). -- ALGLIB -- Copyright 07.09.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitlinearwc(double[] y, double[] w, double[,] fmatrix, double[,] cmatrix, int n, int m, int k, ref int info, ref double[] c, lsfitreport rep, alglib.xparams _params) { int i = 0; int j = 0; double[] tau = new double[0]; double[,] q = new double[0,0]; double[,] f2 = new double[0,0]; double[] tmp = new double[0]; double[] c0 = new double[0]; double v = 0; int i_ = 0; y = (double[])y.Clone(); cmatrix = (double[,])cmatrix.Clone(); info = 0; c = new double[0]; alglib.ap.assert(n>=1, "LSFitLinearWC: N<1!"); alglib.ap.assert(m>=1, "LSFitLinearWC: M<1!"); alglib.ap.assert(k>=0, "LSFitLinearWC: K<0!"); alglib.ap.assert(alglib.ap.len(y)>=n, "LSFitLinearWC: length(Y)=n, "LSFitLinearWC: length(W)=n, "LSFitLinearWC: rows(FMatrix)=m, "LSFitLinearWC: cols(FMatrix)=k, "LSFitLinearWC: rows(CMatrix)=m+1 || k==0, "LSFitLinearWC: cols(CMatrix)=m ) { info = -3; return; } // // Solve // if( k==0 ) { // // no constraints // lsfitlinearinternal(y, w, fmatrix, n, m, ref info, ref c, rep, _params); } else { // // First, find general form solution of constraints system: // * factorize C = L*Q // * unpack Q // * fill upper part of C with zeros (for RCond) // // We got C=C0+Q2'*y where Q2 is lower M-K rows of Q. // ortfac.rmatrixlq(ref cmatrix, k, m, ref tau, _params); ortfac.rmatrixlqunpackq(cmatrix, k, m, tau, m, ref q, _params); for(i=0; i<=k-1; i++) { for(j=i+1; j<=m-1; j++) { cmatrix[i,j] = 0.0; } } if( (double)(rcond.rmatrixlurcondinf(cmatrix, k, _params))<(double)(1000*math.machineepsilon) ) { info = -3; return; } tmp = new double[k]; for(i=0; i<=k-1; i++) { if( i>0 ) { v = 0.0; for(i_=0; i_<=i-1;i_++) { v += cmatrix[i,i_]*tmp[i_]; } } else { v = 0; } tmp[i] = (cmatrix[i,m]-v)/cmatrix[i,i]; } c0 = new double[m]; for(i=0; i<=m-1; i++) { c0[i] = 0; } for(i=0; i<=k-1; i++) { v = tmp[i]; for(i_=0; i_<=m-1;i_++) { c0[i_] = c0[i_] + v*q[i,i_]; } } // // Second, prepare modified matrix F2 = F*Q2' and solve modified task // tmp = new double[Math.Max(n, m)+1]; f2 = new double[n, m-k]; blas.matrixvectormultiply(fmatrix, 0, n-1, 0, m-1, false, c0, 0, m-1, -1.0, ref y, 0, n-1, 1.0, _params); ablas.rmatrixgemm(n, m-k, m, 1.0, fmatrix, 0, 0, 0, q, k, 0, 1, 0.0, f2, 0, 0, _params); lsfitlinearinternal(y, w, f2, n, m-k, ref info, ref tmp, rep, _params); rep.taskrcond = -1; if( info<=0 ) { return; } // // then, convert back to original answer: C = C0 + Q2'*Y0 // c = new double[m]; for(i_=0; i_<=m-1;i_++) { c[i_] = c0[i_]; } blas.matrixvectormultiply(q, k, m-1, 0, m-1, true, tmp, 0, m-k-1, 1.0, ref c, 0, m-1, 1.0, _params); } } /************************************************************************* Linear least squares fitting. QR decomposition is used to reduce task to MxM, then triangular solver or SVD-based solver is used depending on condition number of the system. It allows to maximize speed and retain decent accuracy. IMPORTANT: if you want to perform polynomial fitting, it may be more convenient to use PolynomialFit() function. This function gives best results on polynomial problems and solves numerical stability issues which arise when you fit high-degree polynomials to your data. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: Y - array[0..N-1] Function values in N points. FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. FMatrix[I, J] - value of J-th basis function in I-th point. N - number of points used. N>=1. M - number of basis functions, M>=1. OUTPUT PARAMETERS: Info - error code: * -4 internal SVD decomposition subroutine failed (very rare and for degenerate systems only) * 1 task is solved C - decomposition coefficients, array[0..M-1] Rep - fitting report. Following fields are set: * Rep.TaskRCond reciprocal of condition number * R2 non-adjusted coefficient of determination (non-weighted) * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED ERRORS IN PARAMETERS This solver also calculates different kinds of errors in parameters and fills corresponding fields of report: * Rep.CovPar covariance matrix for parameters, array[K,K]. * Rep.ErrPar errors in parameters, array[K], errpar = sqrt(diag(CovPar)) * Rep.ErrCurve vector of fit errors - standard deviations of empirical best-fit curve from "ideal" best-fit curve built with infinite number of samples, array[N]. errcurve = sqrt(diag(F*CovPar*F')), where F is functions matrix. * Rep.Noise vector of per-point estimates of noise, array[N] NOTE: noise in the data is estimated as follows: * for fitting without user-supplied weights all points are assumed to have same level of noise, which is estimated from the data * for fitting with user-supplied weights we assume that noise level in I-th point is inversely proportional to Ith weight. Coefficient of proportionality is estimated from the data. NOTE: we apply small amount of regularization when we invert squared Jacobian and calculate covariance matrix. It guarantees that algorithm won't divide by zero during inversion, but skews error estimates a bit (fractional error is about 10^-9). However, we believe that this difference is insignificant for all practical purposes except for the situation when you want to compare ALGLIB results with "reference" implementation up to the last significant digit. NOTE: covariance matrix is estimated using correction for degrees of freedom (covariances are divided by N-M instead of dividing by N). -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitlinear(double[] y, double[,] fmatrix, int n, int m, ref int info, ref double[] c, lsfitreport rep, alglib.xparams _params) { double[] w = new double[0]; int i = 0; info = 0; c = new double[0]; alglib.ap.assert(n>=1, "LSFitLinear: N<1!"); alglib.ap.assert(m>=1, "LSFitLinear: M<1!"); alglib.ap.assert(alglib.ap.len(y)>=n, "LSFitLinear: length(Y)=n, "LSFitLinear: rows(FMatrix)=m, "LSFitLinear: cols(FMatrix)=1. M - number of basis functions, M>=1. K - number of constraints, 0 <= K < M K=0 corresponds to absence of constraints. OUTPUT PARAMETERS: Info - error code: * -4 internal SVD decomposition subroutine failed (very rare and for degenerate systems only) * -3 either too many constraints (M or more), degenerate constraints (some constraints are repetead twice) or inconsistent constraints were specified. * 1 task is solved C - decomposition coefficients, array[0..M-1] Rep - fitting report. Following fields are set: * R2 non-adjusted coefficient of determination (non-weighted) * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroitine doesn't calculate task's condition number for K<>0. ERRORS IN PARAMETERS This solver also calculates different kinds of errors in parameters and fills corresponding fields of report: * Rep.CovPar covariance matrix for parameters, array[K,K]. * Rep.ErrPar errors in parameters, array[K], errpar = sqrt(diag(CovPar)) * Rep.ErrCurve vector of fit errors - standard deviations of empirical best-fit curve from "ideal" best-fit curve built with infinite number of samples, array[N]. errcurve = sqrt(diag(F*CovPar*F')), where F is functions matrix. * Rep.Noise vector of per-point estimates of noise, array[N] IMPORTANT: errors in parameters are calculated without taking into account boundary/linear constraints! Presence of constraints changes distribution of errors, but there is no easy way to account for constraints when you calculate covariance matrix. NOTE: noise in the data is estimated as follows: * for fitting without user-supplied weights all points are assumed to have same level of noise, which is estimated from the data * for fitting with user-supplied weights we assume that noise level in I-th point is inversely proportional to Ith weight. Coefficient of proportionality is estimated from the data. NOTE: we apply small amount of regularization when we invert squared Jacobian and calculate covariance matrix. It guarantees that algorithm won't divide by zero during inversion, but skews error estimates a bit (fractional error is about 10^-9). However, we believe that this difference is insignificant for all practical purposes except for the situation when you want to compare ALGLIB results with "reference" implementation up to the last significant digit. NOTE: covariance matrix is estimated using correction for degrees of freedom (covariances are divided by N-M instead of dividing by N). -- ALGLIB -- Copyright 07.09.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitlinearc(double[] y, double[,] fmatrix, double[,] cmatrix, int n, int m, int k, ref int info, ref double[] c, lsfitreport rep, alglib.xparams _params) { double[] w = new double[0]; int i = 0; y = (double[])y.Clone(); info = 0; c = new double[0]; alglib.ap.assert(n>=1, "LSFitLinearC: N<1!"); alglib.ap.assert(m>=1, "LSFitLinearC: M<1!"); alglib.ap.assert(k>=0, "LSFitLinearC: K<0!"); alglib.ap.assert(alglib.ap.len(y)>=n, "LSFitLinearC: length(Y)=n, "LSFitLinearC: rows(FMatrix)=m, "LSFitLinearC: cols(FMatrix)=k, "LSFitLinearC: rows(CMatrix)=m+1 || k==0, "LSFitLinearC: cols(CMatrix)1 M - dimension of space K - number of parameters being fitted DiffStep- numerical differentiation step; should not be very small or large; large = loss of accuracy small = growth of round-off errors OUTPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 18.10.2008 by Bochkanov Sergey *************************************************************************/ public static void lsfitcreatewf(double[,] x, double[] y, double[] w, double[] c, int n, int m, int k, double diffstep, lsfitstate state, alglib.xparams _params) { int i = 0; int i_ = 0; alglib.ap.assert(n>=1, "LSFitCreateWF: N<1!"); alglib.ap.assert(m>=1, "LSFitCreateWF: M<1!"); alglib.ap.assert(k>=1, "LSFitCreateWF: K<1!"); alglib.ap.assert(alglib.ap.len(c)>=k, "LSFitCreateWF: length(C)=n, "LSFitCreateWF: length(Y)=n, "LSFitCreateWF: length(W)=n, "LSFitCreateWF: rows(X)=m, "LSFitCreateWF: cols(X)(double)(0), "LSFitCreateWF: DiffStep<=0!"); state.teststep = 0; state.diffstep = diffstep; state.npoints = n; state.nweights = n; state.wkind = 1; state.m = m; state.k = k; lsfitsetcond(state, 0.0, 0, _params); lsfitsetstpmax(state, 0.0, _params); lsfitsetxrep(state, false, _params); state.taskx = new double[n, m]; state.tasky = new double[n]; state.taskw = new double[n]; state.c = new double[k]; state.c0 = new double[k]; state.c1 = new double[k]; for(i_=0; i_<=k-1;i_++) { state.c0[i_] = c[i_]; } for(i_=0; i_<=k-1;i_++) { state.c1[i_] = c[i_]; } state.x = new double[m]; for(i_=0; i_<=n-1;i_++) { state.taskw[i_] = w[i_]; } for(i=0; i<=n-1; i++) { for(i_=0; i_<=m-1;i_++) { state.taskx[i,i_] = x[i,i_]; } state.tasky[i] = y[i]; } state.s = new double[k]; state.bndl = new double[k]; state.bndu = new double[k]; for(i=0; i<=k-1; i++) { state.s[i] = 1.0; state.bndl[i] = Double.NegativeInfinity; state.bndu[i] = Double.PositiveInfinity; } state.optalgo = 0; state.prevnpt = -1; state.prevalgo = -1; state.nec = 0; state.nic = 0; minlm.minlmcreatev(k, n, state.c0, diffstep, state.optstate, _params); lsfitclearrequestfields(state, _params); state.rstate.ia = new int[6+1]; state.rstate.ra = new double[8+1]; state.rstate.stage = -1; } /************************************************************************* Nonlinear least squares fitting using function values only. Combination of numerical differentiation and secant updates is used to obtain function Jacobian. Nonlinear task min(F(c)) is solved, where F(c) = (f(c,x[0])-y[0])^2 + ... + (f(c,x[n-1])-y[n-1])^2, * N is a number of points, * M is a dimension of a space points belong to, * K is a dimension of a space of parameters being fitted, * w is an N-dimensional vector of weight coefficients, * x is a set of N points, each of them is an M-dimensional vector, * c is a K-dimensional vector of parameters being fitted This subroutine uses only f(c,x[i]). INPUT PARAMETERS: X - array[0..N-1,0..M-1], points (one row = one point) Y - array[0..N-1], function values. C - array[0..K-1], initial approximation to the solution, N - number of points, N>1 M - dimension of space K - number of parameters being fitted DiffStep- numerical differentiation step; should not be very small or large; large = loss of accuracy small = growth of round-off errors OUTPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 18.10.2008 by Bochkanov Sergey *************************************************************************/ public static void lsfitcreatef(double[,] x, double[] y, double[] c, int n, int m, int k, double diffstep, lsfitstate state, alglib.xparams _params) { int i = 0; int i_ = 0; alglib.ap.assert(n>=1, "LSFitCreateF: N<1!"); alglib.ap.assert(m>=1, "LSFitCreateF: M<1!"); alglib.ap.assert(k>=1, "LSFitCreateF: K<1!"); alglib.ap.assert(alglib.ap.len(c)>=k, "LSFitCreateF: length(C)=n, "LSFitCreateF: length(Y)=n, "LSFitCreateF: rows(X)=m, "LSFitCreateF: cols(X)=n, "LSFitCreateF: rows(X)=m, "LSFitCreateF: cols(X)(double)(0), "LSFitCreateF: DiffStep<=0!"); state.teststep = 0; state.diffstep = diffstep; state.npoints = n; state.wkind = 0; state.m = m; state.k = k; lsfitsetcond(state, 0.0, 0, _params); lsfitsetstpmax(state, 0.0, _params); lsfitsetxrep(state, false, _params); state.taskx = new double[n, m]; state.tasky = new double[n]; state.c = new double[k]; state.c0 = new double[k]; state.c1 = new double[k]; for(i_=0; i_<=k-1;i_++) { state.c0[i_] = c[i_]; } for(i_=0; i_<=k-1;i_++) { state.c1[i_] = c[i_]; } state.x = new double[m]; for(i=0; i<=n-1; i++) { for(i_=0; i_<=m-1;i_++) { state.taskx[i,i_] = x[i,i_]; } state.tasky[i] = y[i]; } state.s = new double[k]; state.bndl = new double[k]; state.bndu = new double[k]; for(i=0; i<=k-1; i++) { state.s[i] = 1.0; state.bndl[i] = Double.NegativeInfinity; state.bndu[i] = Double.PositiveInfinity; } state.optalgo = 0; state.prevnpt = -1; state.prevalgo = -1; state.nec = 0; state.nic = 0; minlm.minlmcreatev(k, n, state.c0, diffstep, state.optstate, _params); lsfitclearrequestfields(state, _params); state.rstate.ia = new int[6+1]; state.rstate.ra = new double[8+1]; state.rstate.stage = -1; } /************************************************************************* Weighted nonlinear least squares fitting using gradient only. Nonlinear task min(F(c)) is solved, where F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, * N is a number of points, * M is a dimension of a space points belong to, * K is a dimension of a space of parameters being fitted, * w is an N-dimensional vector of weight coefficients, * x is a set of N points, each of them is an M-dimensional vector, * c is a K-dimensional vector of parameters being fitted This subroutine uses only f(c,x[i]) and its gradient. INPUT PARAMETERS: X - array[0..N-1,0..M-1], points (one row = one point) Y - array[0..N-1], function values. W - weights, array[0..N-1] C - array[0..K-1], initial approximation to the solution, N - number of points, N>1 M - dimension of space K - number of parameters being fitted CheapFG - boolean flag, which is: * True if both function and gradient calculation complexity are less than O(M^2). An improved algorithm can be used which corresponds to FGJ scheme from MINLM unit. * False otherwise. Standard Jacibian-bases Levenberg-Marquardt algo will be used (FJ scheme). OUTPUT PARAMETERS: State - structure which stores algorithm state See also: LSFitResults LSFitCreateFG (fitting without weights) LSFitCreateWFGH (fitting using Hessian) LSFitCreateFGH (fitting using Hessian, without weights) -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitcreatewfg(double[,] x, double[] y, double[] w, double[] c, int n, int m, int k, bool cheapfg, lsfitstate state, alglib.xparams _params) { int i = 0; int i_ = 0; alglib.ap.assert(n>=1, "LSFitCreateWFG: N<1!"); alglib.ap.assert(m>=1, "LSFitCreateWFG: M<1!"); alglib.ap.assert(k>=1, "LSFitCreateWFG: K<1!"); alglib.ap.assert(alglib.ap.len(c)>=k, "LSFitCreateWFG: length(C)=n, "LSFitCreateWFG: length(Y)=n, "LSFitCreateWFG: length(W)=n, "LSFitCreateWFG: rows(X)=m, "LSFitCreateWFG: cols(X)1 M - dimension of space K - number of parameters being fitted CheapFG - boolean flag, which is: * True if both function and gradient calculation complexity are less than O(M^2). An improved algorithm can be used which corresponds to FGJ scheme from MINLM unit. * False otherwise. Standard Jacibian-bases Levenberg-Marquardt algo will be used (FJ scheme). OUTPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitcreatefg(double[,] x, double[] y, double[] c, int n, int m, int k, bool cheapfg, lsfitstate state, alglib.xparams _params) { int i = 0; int i_ = 0; alglib.ap.assert(n>=1, "LSFitCreateFG: N<1!"); alglib.ap.assert(m>=1, "LSFitCreateFG: M<1!"); alglib.ap.assert(k>=1, "LSFitCreateFG: K<1!"); alglib.ap.assert(alglib.ap.len(c)>=k, "LSFitCreateFG: length(C)=n, "LSFitCreateFG: length(Y)=n, "LSFitCreateFG: rows(X)=m, "LSFitCreateFG: cols(X)=n, "LSFitCreateFG: rows(X)=m, "LSFitCreateFG: cols(X)1 M - dimension of space K - number of parameters being fitted OUTPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitcreatewfgh(double[,] x, double[] y, double[] w, double[] c, int n, int m, int k, lsfitstate state, alglib.xparams _params) { int i = 0; int i_ = 0; alglib.ap.assert(n>=1, "LSFitCreateWFGH: N<1!"); alglib.ap.assert(m>=1, "LSFitCreateWFGH: M<1!"); alglib.ap.assert(k>=1, "LSFitCreateWFGH: K<1!"); alglib.ap.assert(alglib.ap.len(c)>=k, "LSFitCreateWFGH: length(C)=n, "LSFitCreateWFGH: length(Y)=n, "LSFitCreateWFGH: length(W)=n, "LSFitCreateWFGH: rows(X)=m, "LSFitCreateWFGH: cols(X)1 M - dimension of space K - number of parameters being fitted OUTPUT PARAMETERS: State - structure which stores algorithm state -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitcreatefgh(double[,] x, double[] y, double[] c, int n, int m, int k, lsfitstate state, alglib.xparams _params) { int i = 0; int i_ = 0; alglib.ap.assert(n>=1, "LSFitCreateFGH: N<1!"); alglib.ap.assert(m>=1, "LSFitCreateFGH: M<1!"); alglib.ap.assert(k>=1, "LSFitCreateFGH: K<1!"); alglib.ap.assert(alglib.ap.len(c)>=k, "LSFitCreateFGH: length(C)=n, "LSFitCreateFGH: length(Y)=n, "LSFitCreateFGH: rows(X)=m, "LSFitCreateFGH: cols(X)=0 The subroutine finishes its work if on k+1-th iteration the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] * dx - ste pvector, dx=X(k+1)-X(k) * s - scaling coefficients set by LSFitSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. Only Levenberg-Marquardt iterations are counted (L-BFGS/CG iterations are NOT counted because their cost is very low compared to that of LM). NOTE Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic stopping criterion selection (according to the scheme used by MINLM unit). -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitsetcond(lsfitstate state, double epsx, int maxits, alglib.xparams _params) { alglib.ap.assert(math.isfinite(epsx), "LSFitSetCond: EpsX is not finite!"); alglib.ap.assert((double)(epsx)>=(double)(0), "LSFitSetCond: negative EpsX!"); alglib.ap.assert(maxits>=0, "LSFitSetCond: negative MaxIts!"); state.epsx = epsx; state.maxits = maxits; } /************************************************************************* This function sets maximum step length INPUT PARAMETERS: State - structure which stores algorithm state StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't want to limit step length. Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too large steps which leads to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. NOTE: non-zero StpMax leads to moderate performance degradation because intermediate step of preconditioned L-BFGS optimization is incompatible with limits on step size. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ public static void lsfitsetstpmax(lsfitstate state, double stpmax, alglib.xparams _params) { alglib.ap.assert((double)(stpmax)>=(double)(0), "LSFitSetStpMax: StpMax<0!"); state.stpmax = stpmax; } /************************************************************************* This function turns on/off reporting. INPUT PARAMETERS: State - structure which stores algorithm state NeedXRep- whether iteration reports are needed or not When reports are needed, State.C (current parameters) and State.F (current value of fitting function) are reported. -- ALGLIB -- Copyright 15.08.2010 by Bochkanov Sergey *************************************************************************/ public static void lsfitsetxrep(lsfitstate state, bool needxrep, alglib.xparams _params) { state.xrep = needxrep; } /************************************************************************* This function sets scaling coefficients for underlying optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of the I-th variable is a translation invariant measure of: a) "how large" the variable is b) how large the step should be to make significant changes in the function Generally, scale is NOT considered to be a form of preconditioner. But LM optimizer is unique in that it uses scaling matrix both in the stopping condition tests and as Marquardt damping factor. Proper scaling is very important for the algorithm performance. It is less important for the quality of results, but still has some influence (it is easier to converge when variables are properly scaled, so premature stopping is possible when very badly scalled variables are combined with relaxed stopping conditions). INPUT PARAMETERS: State - structure stores algorithm state S - array[N], non-zero scaling coefficients S[i] may be negative, sign doesn't matter. -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ public static void lsfitsetscale(lsfitstate state, double[] s, alglib.xparams _params) { int i = 0; alglib.ap.assert(alglib.ap.len(s)>=state.k, "LSFitSetScale: Length(S)=k, "LSFitSetBC: Length(BndL)=k, "LSFitSetBC: Length(BndU)BndU[i]"); } state.bndl[i] = bndl[i]; state.bndu[i] = bndu[i]; } } /************************************************************************* This function sets linear constraints for underlying optimizer Linear constraints are inactive by default (after initial creation). They are preserved until explicitly turned off with another SetLC() call. INPUT PARAMETERS: State - structure stores algorithm state C - linear constraints, array[K,N+1]. Each row of C represents one constraint, either equality or inequality (see below): * first N elements correspond to coefficients, * last element corresponds to the right part. All elements of C (including right part) must be finite. CT - type of constraints, array[K]: * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] K - number of equality/inequality constraints, K>=0: * if given, only leading K elements of C/CT are used * if not given, automatically determined from sizes of C/CT IMPORTANT: if you have linear constraints, it is strongly recommended to set scale of variables with lsfitsetscale(). QP solver which is used to calculate linearly constrained steps heavily relies on good scaling of input problems. NOTE: linear (non-box) constraints are satisfied only approximately - there always exists some violation due to numerical errors and algorithmic limitations. NOTE: general linear constraints add significant overhead to solution process. Although solver performs roughly same amount of iterations (when compared with similar box-only constrained problem), each iteration now involves solution of linearly constrained QP subproblem, which requires ~3-5 times more Cholesky decompositions. Thus, if you can reformulate your problem in such way this it has only box constraints, it may be beneficial to do so. -- ALGLIB -- Copyright 29.04.2017 by Bochkanov Sergey *************************************************************************/ public static void lsfitsetlc(lsfitstate state, double[,] c, int[] ct, int k, alglib.xparams _params) { int i = 0; int n = 0; int i_ = 0; n = state.k; // // First, check for errors in the inputs // alglib.ap.assert(k>=0, "LSFitSetLC: K<0"); alglib.ap.assert(alglib.ap.cols(c)>=n+1 || k==0, "LSFitSetLC: Cols(C)=k, "LSFitSetLC: Rows(C)=k, "LSFitSetLC: Length(CT)0 ) { for(i_=0; i_<=n;i_++) { state.cleic[state.nec+state.nic,i_] = -c[i,i_]; } } else { for(i_=0; i_<=n;i_++) { state.cleic[state.nec+state.nic,i_] = c[i,i_]; } } state.nic = state.nic+1; } } } /************************************************************************* NOTES: 1. this algorithm is somewhat unusual because it works with parameterized function f(C,X), where X is a function argument (we have many points which are characterized by different argument values), and C is a parameter to fit. For example, if we want to do linear fit by f(c0,c1,x) = c0*x+c1, then x will be argument, and {c0,c1} will be parameters. It is important to understand that this algorithm finds minimum in the space of function PARAMETERS (not arguments), so it needs derivatives of f() with respect to C, not X. In the example above it will need f=c0*x+c1 and {df/dc0,df/dc1} = {x,1} instead of {df/dx} = {c0}. 2. Callback functions accept C as the first parameter, and X as the second 3. If state was created with LSFitCreateFG(), algorithm needs just function and its gradient, but if state was created with LSFitCreateFGH(), algorithm will need function, gradient and Hessian. According to the said above, there ase several versions of this function, which accept different sets of callbacks. This flexibility opens way to subtle errors - you may create state with LSFitCreateFGH() (optimization using Hessian), but call function which does not accept Hessian. So when algorithm will request Hessian, there will be no callback to call. In this case exception will be thrown. Be careful to avoid such errors because there is no way to find them at compile time - you can see them at runtime only. -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static bool lsfititeration(lsfitstate state, alglib.xparams _params) { bool result = new bool(); double lx = 0; double lf = 0; double ld = 0; double rx = 0; double rf = 0; double rd = 0; int n = 0; int m = 0; int k = 0; double v = 0; double vv = 0; double relcnt = 0; int i = 0; int j = 0; int j1 = 0; int info = 0; int i_ = 0; // // Reverse communication preparations // I know it looks ugly, but it works the same way // anywhere from C++ to Python. // // This code initializes locals by: // * random values determined during code // generation - on first subroutine call // * values from previous call - on subsequent calls // if( state.rstate.stage>=0 ) { n = state.rstate.ia[0]; m = state.rstate.ia[1]; k = state.rstate.ia[2]; i = state.rstate.ia[3]; j = state.rstate.ia[4]; j1 = state.rstate.ia[5]; info = state.rstate.ia[6]; lx = state.rstate.ra[0]; lf = state.rstate.ra[1]; ld = state.rstate.ra[2]; rx = state.rstate.ra[3]; rf = state.rstate.ra[4]; rd = state.rstate.ra[5]; v = state.rstate.ra[6]; vv = state.rstate.ra[7]; relcnt = state.rstate.ra[8]; } else { n = 359; m = -58; k = -919; i = -909; j = 81; j1 = 255; info = 74; lx = -788; lf = 809; ld = 205; rx = -838; rf = 939; rd = -526; v = 763; vv = -541; relcnt = -698; } if( state.rstate.stage==0 ) { goto lbl_0; } if( state.rstate.stage==1 ) { goto lbl_1; } if( state.rstate.stage==2 ) { goto lbl_2; } if( state.rstate.stage==3 ) { goto lbl_3; } if( state.rstate.stage==4 ) { goto lbl_4; } if( state.rstate.stage==5 ) { goto lbl_5; } if( state.rstate.stage==6 ) { goto lbl_6; } if( state.rstate.stage==7 ) { goto lbl_7; } if( state.rstate.stage==8 ) { goto lbl_8; } if( state.rstate.stage==9 ) { goto lbl_9; } if( state.rstate.stage==10 ) { goto lbl_10; } if( state.rstate.stage==11 ) { goto lbl_11; } if( state.rstate.stage==12 ) { goto lbl_12; } if( state.rstate.stage==13 ) { goto lbl_13; } // // Routine body // // // Init // if( state.wkind==1 ) { alglib.ap.assert(state.npoints==state.nweights, "LSFitFit: number of points is not equal to the number of weights"); } state.repvaridx = -1; n = state.npoints; m = state.m; k = state.k; apserv.ivectorsetlengthatleast(ref state.tmpct, state.nec+state.nic, _params); for(i=0; i<=state.nec-1; i++) { state.tmpct[i] = 0; } for(i=0; i<=state.nic-1; i++) { state.tmpct[state.nec+i] = -1; } minlm.minlmsetcond(state.optstate, state.epsx, state.maxits, _params); minlm.minlmsetstpmax(state.optstate, state.stpmax, _params); minlm.minlmsetxrep(state.optstate, state.xrep, _params); minlm.minlmsetscale(state.optstate, state.s, _params); minlm.minlmsetbc(state.optstate, state.bndl, state.bndu, _params); minlm.minlmsetlc(state.optstate, state.cleic, state.tmpct, state.nec+state.nic, _params); // // Check that user-supplied gradient is correct // lsfitclearrequestfields(state, _params); if( !((double)(state.teststep)>(double)(0) && state.optalgo==1) ) { goto lbl_14; } for(i=0; i<=k-1; i++) { state.c[i] = state.c0[i]; if( math.isfinite(state.bndl[i]) ) { state.c[i] = Math.Max(state.c[i], state.bndl[i]); } if( math.isfinite(state.bndu[i]) ) { state.c[i] = Math.Min(state.c[i], state.bndu[i]); } } state.needfg = true; i = 0; lbl_16: if( i>k-1 ) { goto lbl_18; } alglib.ap.assert((double)(state.bndl[i])<=(double)(state.c[i]) && (double)(state.c[i])<=(double)(state.bndu[i]), "LSFitIteration: internal error(State.C is out of bounds)"); v = state.c[i]; j = 0; lbl_19: if( j>n-1 ) { goto lbl_21; } for(i_=0; i_<=m-1;i_++) { state.x[i_] = state.taskx[j,i_]; } state.c[i] = v-state.teststep*state.s[i]; if( math.isfinite(state.bndl[i]) ) { state.c[i] = Math.Max(state.c[i], state.bndl[i]); } lx = state.c[i]; state.rstate.stage = 0; goto lbl_rcomm; lbl_0: lf = state.f; ld = state.g[i]; state.c[i] = v+state.teststep*state.s[i]; if( math.isfinite(state.bndu[i]) ) { state.c[i] = Math.Min(state.c[i], state.bndu[i]); } rx = state.c[i]; state.rstate.stage = 1; goto lbl_rcomm; lbl_1: rf = state.f; rd = state.g[i]; state.c[i] = (lx+rx)/2; if( math.isfinite(state.bndl[i]) ) { state.c[i] = Math.Max(state.c[i], state.bndl[i]); } if( math.isfinite(state.bndu[i]) ) { state.c[i] = Math.Min(state.c[i], state.bndu[i]); } state.rstate.stage = 2; goto lbl_rcomm; lbl_2: state.c[i] = v; if( !optserv.derivativecheck(lf, ld, rf, rd, state.f, state.g[i], rx-lx, _params) ) { state.repvaridx = i; state.repterminationtype = -7; result = false; return result; } j = j+1; goto lbl_19; lbl_21: i = i+1; goto lbl_16; lbl_18: state.needfg = false; lbl_14: // // Fill WCur by weights: // * for WKind=0 unit weights are chosen // * for WKind=1 we use user-supplied weights stored in State.TaskW // apserv.rvectorsetlengthatleast(ref state.wcur, n, _params); for(i=0; i<=n-1; i++) { state.wcur[i] = 1.0; if( state.wkind==1 ) { state.wcur[i] = state.taskw[i]; } } // // Optimize // lbl_22: if( !minlm.minlmiteration(state.optstate, _params) ) { goto lbl_23; } if( !state.optstate.needfi ) { goto lbl_24; } // // calculate f[] = wi*(f(xi,c)-yi) // i = 0; lbl_26: if( i>n-1 ) { goto lbl_28; } for(i_=0; i_<=k-1;i_++) { state.c[i_] = state.optstate.x[i_]; } for(i_=0; i_<=m-1;i_++) { state.x[i_] = state.taskx[i,i_]; } state.pointindex = i; lsfitclearrequestfields(state, _params); state.needf = true; state.rstate.stage = 3; goto lbl_rcomm; lbl_3: state.needf = false; vv = state.wcur[i]; state.optstate.fi[i] = vv*(state.f-state.tasky[i]); i = i+1; goto lbl_26; lbl_28: goto lbl_22; lbl_24: if( !state.optstate.needf ) { goto lbl_29; } // // calculate F = sum (wi*(f(xi,c)-yi))^2 // state.optstate.f = 0; i = 0; lbl_31: if( i>n-1 ) { goto lbl_33; } for(i_=0; i_<=k-1;i_++) { state.c[i_] = state.optstate.x[i_]; } for(i_=0; i_<=m-1;i_++) { state.x[i_] = state.taskx[i,i_]; } state.pointindex = i; lsfitclearrequestfields(state, _params); state.needf = true; state.rstate.stage = 4; goto lbl_rcomm; lbl_4: state.needf = false; vv = state.wcur[i]; state.optstate.f = state.optstate.f+math.sqr(vv*(state.f-state.tasky[i])); i = i+1; goto lbl_31; lbl_33: goto lbl_22; lbl_29: if( !state.optstate.needfg ) { goto lbl_34; } // // calculate F/gradF // state.optstate.f = 0; for(i=0; i<=k-1; i++) { state.optstate.g[i] = 0; } i = 0; lbl_36: if( i>n-1 ) { goto lbl_38; } for(i_=0; i_<=k-1;i_++) { state.c[i_] = state.optstate.x[i_]; } for(i_=0; i_<=m-1;i_++) { state.x[i_] = state.taskx[i,i_]; } state.pointindex = i; lsfitclearrequestfields(state, _params); state.needfg = true; state.rstate.stage = 5; goto lbl_rcomm; lbl_5: state.needfg = false; vv = state.wcur[i]; state.optstate.f = state.optstate.f+math.sqr(vv*(state.f-state.tasky[i])); v = math.sqr(vv)*2*(state.f-state.tasky[i]); for(i_=0; i_<=k-1;i_++) { state.optstate.g[i_] = state.optstate.g[i_] + v*state.g[i_]; } i = i+1; goto lbl_36; lbl_38: goto lbl_22; lbl_34: if( !state.optstate.needfij ) { goto lbl_39; } // // calculate Fi/jac(Fi) // i = 0; lbl_41: if( i>n-1 ) { goto lbl_43; } for(i_=0; i_<=k-1;i_++) { state.c[i_] = state.optstate.x[i_]; } for(i_=0; i_<=m-1;i_++) { state.x[i_] = state.taskx[i,i_]; } state.pointindex = i; lsfitclearrequestfields(state, _params); state.needfg = true; state.rstate.stage = 6; goto lbl_rcomm; lbl_6: state.needfg = false; vv = state.wcur[i]; state.optstate.fi[i] = vv*(state.f-state.tasky[i]); for(i_=0; i_<=k-1;i_++) { state.optstate.j[i,i_] = vv*state.g[i_]; } i = i+1; goto lbl_41; lbl_43: goto lbl_22; lbl_39: if( !state.optstate.needfgh ) { goto lbl_44; } // // calculate F/grad(F)/hess(F) // state.optstate.f = 0; for(i=0; i<=k-1; i++) { state.optstate.g[i] = 0; } for(i=0; i<=k-1; i++) { for(j=0; j<=k-1; j++) { state.optstate.h[i,j] = 0; } } i = 0; lbl_46: if( i>n-1 ) { goto lbl_48; } for(i_=0; i_<=k-1;i_++) { state.c[i_] = state.optstate.x[i_]; } for(i_=0; i_<=m-1;i_++) { state.x[i_] = state.taskx[i,i_]; } state.pointindex = i; lsfitclearrequestfields(state, _params); state.needfgh = true; state.rstate.stage = 7; goto lbl_rcomm; lbl_7: state.needfgh = false; vv = state.wcur[i]; state.optstate.f = state.optstate.f+math.sqr(vv*(state.f-state.tasky[i])); v = math.sqr(vv)*2*(state.f-state.tasky[i]); for(i_=0; i_<=k-1;i_++) { state.optstate.g[i_] = state.optstate.g[i_] + v*state.g[i_]; } for(j=0; j<=k-1; j++) { v = 2*math.sqr(vv)*state.g[j]; for(i_=0; i_<=k-1;i_++) { state.optstate.h[j,i_] = state.optstate.h[j,i_] + v*state.g[i_]; } v = 2*math.sqr(vv)*(state.f-state.tasky[i]); for(i_=0; i_<=k-1;i_++) { state.optstate.h[j,i_] = state.optstate.h[j,i_] + v*state.h[j,i_]; } } i = i+1; goto lbl_46; lbl_48: goto lbl_22; lbl_44: if( !state.optstate.xupdated ) { goto lbl_49; } // // Report new iteration // for(i_=0; i_<=k-1;i_++) { state.c[i_] = state.optstate.x[i_]; } state.f = state.optstate.f; lsfitclearrequestfields(state, _params); state.xupdated = true; state.rstate.stage = 8; goto lbl_rcomm; lbl_8: state.xupdated = false; goto lbl_22; lbl_49: goto lbl_22; lbl_23: // // Extract results // // NOTE: reverse communication protocol used by this unit does NOT // allow us to reallocate State.C[] array. Thus, we extract // results to the temporary variable in order to avoid possible // reallocation. // minlm.minlmresults(state.optstate, ref state.c1, state.optrep, _params); state.repterminationtype = state.optrep.terminationtype; state.repiterationscount = state.optrep.iterationscount; // // calculate errors // if( state.repterminationtype<=0 ) { goto lbl_51; } // // Calculate RMS/Avg/Max/... errors // state.reprmserror = 0; state.repwrmserror = 0; state.repavgerror = 0; state.repavgrelerror = 0; state.repmaxerror = 0; relcnt = 0; i = 0; lbl_53: if( i>n-1 ) { goto lbl_55; } for(i_=0; i_<=k-1;i_++) { state.c[i_] = state.c1[i_]; } for(i_=0; i_<=m-1;i_++) { state.x[i_] = state.taskx[i,i_]; } state.pointindex = i; lsfitclearrequestfields(state, _params); state.needf = true; state.rstate.stage = 9; goto lbl_rcomm; lbl_9: state.needf = false; v = state.f; vv = state.wcur[i]; state.reprmserror = state.reprmserror+math.sqr(v-state.tasky[i]); state.repwrmserror = state.repwrmserror+math.sqr(vv*(v-state.tasky[i])); state.repavgerror = state.repavgerror+Math.Abs(v-state.tasky[i]); if( (double)(state.tasky[i])!=(double)(0) ) { state.repavgrelerror = state.repavgrelerror+Math.Abs(v-state.tasky[i])/Math.Abs(state.tasky[i]); relcnt = relcnt+1; } state.repmaxerror = Math.Max(state.repmaxerror, Math.Abs(v-state.tasky[i])); i = i+1; goto lbl_53; lbl_55: state.reprmserror = Math.Sqrt(state.reprmserror/n); state.repwrmserror = Math.Sqrt(state.repwrmserror/n); state.repavgerror = state.repavgerror/n; if( (double)(relcnt)!=(double)(0) ) { state.repavgrelerror = state.repavgrelerror/relcnt; } // // Calculate covariance matrix // apserv.rmatrixsetlengthatleast(ref state.tmpjac, n, k, _params); apserv.rvectorsetlengthatleast(ref state.tmpf, n, _params); apserv.rvectorsetlengthatleast(ref state.tmp, k, _params); if( (double)(state.diffstep)<=(double)(0) ) { goto lbl_56; } // // Compute Jacobian by means of numerical differentiation // lsfitclearrequestfields(state, _params); state.needf = true; i = 0; lbl_58: if( i>n-1 ) { goto lbl_60; } for(i_=0; i_<=m-1;i_++) { state.x[i_] = state.taskx[i,i_]; } state.pointindex = i; state.rstate.stage = 10; goto lbl_rcomm; lbl_10: state.tmpf[i] = state.f; j = 0; lbl_61: if( j>k-1 ) { goto lbl_63; } v = state.c[j]; lx = v-state.diffstep*state.s[j]; state.c[j] = lx; if( math.isfinite(state.bndl[j]) ) { state.c[j] = Math.Max(state.c[j], state.bndl[j]); } state.rstate.stage = 11; goto lbl_rcomm; lbl_11: lf = state.f; rx = v+state.diffstep*state.s[j]; state.c[j] = rx; if( math.isfinite(state.bndu[j]) ) { state.c[j] = Math.Min(state.c[j], state.bndu[j]); } state.rstate.stage = 12; goto lbl_rcomm; lbl_12: rf = state.f; state.c[j] = v; if( (double)(rx)!=(double)(lx) ) { state.tmpjac[i,j] = (rf-lf)/(rx-lx); } else { state.tmpjac[i,j] = 0; } j = j+1; goto lbl_61; lbl_63: i = i+1; goto lbl_58; lbl_60: state.needf = false; goto lbl_57; lbl_56: // // Jacobian is calculated with user-provided analytic gradient // lsfitclearrequestfields(state, _params); state.needfg = true; i = 0; lbl_64: if( i>n-1 ) { goto lbl_66; } for(i_=0; i_<=m-1;i_++) { state.x[i_] = state.taskx[i,i_]; } state.pointindex = i; state.rstate.stage = 13; goto lbl_rcomm; lbl_13: state.tmpf[i] = state.f; for(j=0; j<=k-1; j++) { state.tmpjac[i,j] = state.g[j]; } i = i+1; goto lbl_64; lbl_66: state.needfg = false; lbl_57: for(i=0; i<=k-1; i++) { state.tmp[i] = 0.0; } estimateerrors(state.tmpjac, state.tmpf, state.tasky, state.wcur, state.tmp, state.s, n, k, state.rep, ref state.tmpjacw, 0, _params); lbl_51: result = false; return result; // // Saving state // lbl_rcomm: result = true; state.rstate.ia[0] = n; state.rstate.ia[1] = m; state.rstate.ia[2] = k; state.rstate.ia[3] = i; state.rstate.ia[4] = j; state.rstate.ia[5] = j1; state.rstate.ia[6] = info; state.rstate.ra[0] = lx; state.rstate.ra[1] = lf; state.rstate.ra[2] = ld; state.rstate.ra[3] = rx; state.rstate.ra[4] = rf; state.rstate.ra[5] = rd; state.rstate.ra[6] = v; state.rstate.ra[7] = vv; state.rstate.ra[8] = relcnt; return result; } /************************************************************************* Nonlinear least squares fitting results. Called after return from LSFitFit(). INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: Info - completion code: * -8 optimizer detected NAN/INF in the target function and/or gradient * -7 gradient verification failed. See LSFitSetGradientCheck() for more information. * -3 inconsistent constraints * 2 relative step is no more than EpsX. * 5 MaxIts steps was taken * 7 stopping conditions are too stringent, further improvement is impossible C - array[0..K-1], solution Rep - optimization report. On success following fields are set: * R2 non-adjusted coefficient of determination (non-weighted) * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED * WRMSError weighted rms error on the (X,Y). ERRORS IN PARAMETERS This solver also calculates different kinds of errors in parameters and fills corresponding fields of report: * Rep.CovPar covariance matrix for parameters, array[K,K]. * Rep.ErrPar errors in parameters, array[K], errpar = sqrt(diag(CovPar)) * Rep.ErrCurve vector of fit errors - standard deviations of empirical best-fit curve from "ideal" best-fit curve built with infinite number of samples, array[N]. errcurve = sqrt(diag(J*CovPar*J')), where J is Jacobian matrix. * Rep.Noise vector of per-point estimates of noise, array[N] IMPORTANT: errors in parameters are calculated without taking into account boundary/linear constraints! Presence of constraints changes distribution of errors, but there is no easy way to account for constraints when you calculate covariance matrix. NOTE: noise in the data is estimated as follows: * for fitting without user-supplied weights all points are assumed to have same level of noise, which is estimated from the data * for fitting with user-supplied weights we assume that noise level in I-th point is inversely proportional to Ith weight. Coefficient of proportionality is estimated from the data. NOTE: we apply small amount of regularization when we invert squared Jacobian and calculate covariance matrix. It guarantees that algorithm won't divide by zero during inversion, but skews error estimates a bit (fractional error is about 10^-9). However, we believe that this difference is insignificant for all practical purposes except for the situation when you want to compare ALGLIB results with "reference" implementation up to the last significant digit. NOTE: covariance matrix is estimated using correction for degrees of freedom (covariances are divided by N-M instead of dividing by N). -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ public static void lsfitresults(lsfitstate state, ref int info, ref double[] c, lsfitreport rep, alglib.xparams _params) { int i = 0; int j = 0; int i_ = 0; info = 0; c = new double[0]; clearreport(rep, _params); info = state.repterminationtype; rep.varidx = state.repvaridx; if( info>0 ) { c = new double[state.k]; for(i_=0; i_<=state.k-1;i_++) { c[i_] = state.c1[i_]; } rep.rmserror = state.reprmserror; rep.wrmserror = state.repwrmserror; rep.avgerror = state.repavgerror; rep.avgrelerror = state.repavgrelerror; rep.maxerror = state.repmaxerror; rep.iterationscount = state.repiterationscount; rep.covpar = new double[state.k, state.k]; rep.errpar = new double[state.k]; rep.errcurve = new double[state.npoints]; rep.noise = new double[state.npoints]; rep.r2 = state.rep.r2; for(i=0; i<=state.k-1; i++) { for(j=0; j<=state.k-1; j++) { rep.covpar[i,j] = state.rep.covpar[i,j]; } rep.errpar[i] = state.rep.errpar[i]; } for(i=0; i<=state.npoints-1; i++) { rep.errcurve[i] = state.rep.errcurve[i]; rep.noise[i] = state.rep.noise[i]; } } } /************************************************************************* This subroutine turns on verification of the user-supplied analytic gradient: * user calls this subroutine before fitting begins * LSFitFit() is called * prior to actual fitting, for each point in data set X_i and each component of parameters being fited C_j algorithm performs following steps: * two trial steps are made to C_j-TestStep*S[j] and C_j+TestStep*S[j], where C_j is j-th parameter and S[j] is a scale of j-th parameter * if needed, steps are bounded with respect to constraints on C[] * F(X_i|C) is evaluated at these trial points * we perform one more evaluation in the middle point of the interval * we build cubic model using function values and derivatives at trial points and we compare its prediction with actual value in the middle point * in case difference between prediction and actual value is higher than some predetermined threshold, algorithm stops with completion code -7; Rep.VarIdx is set to index of the parameter with incorrect derivative. * after verification is over, algorithm proceeds to the actual optimization. NOTE 1: verification needs N*K (points count * parameters count) gradient evaluations. It is very costly and you should use it only for low dimensional problems, when you want to be sure that you've correctly calculated analytic derivatives. You should not use it in the production code (unless you want to check derivatives provided by some third party). NOTE 2: you should carefully choose TestStep. Value which is too large (so large that function behaviour is significantly non-cubic) will lead to false alarms. You may use different step for different parameters by means of setting scale with LSFitSetScale(). NOTE 3: this function may lead to false positives. In case it reports that I-th derivative was calculated incorrectly, you may decrease test step and try one more time - maybe your function changes too sharply and your step is too large for such rapidly chanding function. NOTE 4: this function works only for optimizers created with LSFitCreateWFG() or LSFitCreateFG() constructors. INPUT PARAMETERS: State - structure used to store algorithm state TestStep - verification step: * TestStep=0 turns verification off * TestStep>0 activates verification -- ALGLIB -- Copyright 15.06.2012 by Bochkanov Sergey *************************************************************************/ public static void lsfitsetgradientcheck(lsfitstate state, double teststep, alglib.xparams _params) { alglib.ap.assert(math.isfinite(teststep), "LSFitSetGradientCheck: TestStep contains NaN or Infinite"); alglib.ap.assert((double)(teststep)>=(double)(0), "LSFitSetGradientCheck: invalid argument TestStep(TestStep<0)"); state.teststep = teststep; } /************************************************************************* This function analyzes section of curve for processing by RDP algorithm: given set of points X,Y with indexes [I0,I1] it returns point with worst deviation from linear model (non-parametric version which sees curve as Y(x)). Input parameters: X, Y - SORTED arrays. I0,I1 - interval (boundaries included) to process Eps - desired precision OUTPUT PARAMETERS: WorstIdx - index of worst point WorstError - error at worst point NOTE: this function guarantees that it returns exactly zero for a section with less than 3 points. -- ALGLIB PROJECT -- Copyright 02.10.2014 by Bochkanov Sergey *************************************************************************/ private static void rdpanalyzesection(double[] x, double[] y, int i0, int i1, ref int worstidx, ref double worsterror, alglib.xparams _params) { int i = 0; double xleft = 0; double xright = 0; double vx = 0; double ve = 0; double a = 0; double b = 0; worstidx = 0; worsterror = 0; xleft = x[i0]; xright = x[i1]; if( i1-i0+1<3 || (double)(xright)==(double)(xleft) ) { worstidx = i0; worsterror = 0.0; return; } a = (y[i1]-y[i0])/(xright-xleft); b = (y[i0]*xright-y[i1]*xleft)/(xright-xleft); worstidx = -1; worsterror = 0; for(i=i0+1; i<=i1-1; i++) { vx = x[i]; ve = Math.Abs(a*vx+b-y[i]); if( ((double)(vx)>(double)(xleft) && (double)(vx)<(double)(xright)) && (double)(ve)>(double)(worsterror) ) { worsterror = ve; worstidx = i; } } } /************************************************************************* Recursive splitting of interval [I0,I1] (right boundary included) with RDP algorithm (non-parametric version which sees curve as Y(x)). Input parameters: X, Y - SORTED arrays. I0,I1 - interval (boundaries included) to process Eps - desired precision XOut,YOut - preallocated output arrays large enough to store result; XOut[0..1], YOut[0..1] contain first and last points of curve NOut - must contain 2 on input OUTPUT PARAMETERS: XOut, YOut - curve generated by RDP algorithm, UNSORTED NOut - number of points in curve -- ALGLIB PROJECT -- Copyright 02.10.2014 by Bochkanov Sergey *************************************************************************/ private static void rdprecursive(double[] x, double[] y, int i0, int i1, double eps, double[] xout, double[] yout, ref int nout, alglib.xparams _params) { int worstidx = 0; double worsterror = 0; alglib.ap.assert((double)(eps)>(double)(0), "RDPRecursive: internal error, Eps<0"); rdpanalyzesection(x, y, i0, i1, ref worstidx, ref worsterror, _params); if( (double)(worsterror)<=(double)(eps) ) { return; } xout[nout] = x[worstidx]; yout[nout] = y[worstidx]; nout = nout+1; if( worstidx-i0=(double)(0), "LogisticFitInternal: integrity error"); // // Handle zero X // if( (double)(x[i])==(double)(0) ) { if( (double)(tb)>=(double)(0) ) { // // Positive or zero TB, limit X^TB subject to X->+0 is equal to zero. // state.fi[i] = ta-y[i]; if( state.needfij ) { state.j[i,0] = 1; state.j[i,1] = 0; state.j[i,2] = 0; state.j[i,3] = 0; state.j[i,4] = 0; } } else { // // Negative TB, limit X^TB subject to X->+0 is equal to +INF. // state.fi[i] = td-y[i]; if( state.needfij ) { state.j[i,0] = 0; state.j[i,1] = 0; state.j[i,2] = 0; state.j[i,3] = 1; state.j[i,4] = 0; } } continue; } // // Positive X. // Prepare VP0/VP1, it may become infinite or nearly overflow in some rare cases, // handle these cases // vp0 = Math.Pow(x[i]/tc, tb); if( is4pl ) { vp1 = 1+vp0; } else { vp1 = Math.Pow(1+vp0, tg); } if( (!math.isfinite(vp1) || (double)(vp0)>(double)(1.0E50)) || (double)(vp1)>(double)(1.0E50) ) { // // VP0/VP1 are not finite, assume that it is +INF or -INF // state.fi[i] = td-y[i]; if( state.needfij ) { state.j[i,0] = 0; state.j[i,1] = 0; state.j[i,2] = 0; state.j[i,3] = 1; state.j[i,4] = 0; } continue; } // // VP0/VP1 are finite, normal processing // if( is4pl ) { state.fi[i] = td+(ta-td)/vp1-y[i]; if( state.needfij ) { state.j[i,0] = 1/vp1; state.j[i,1] = -((ta-td)*vp0*Math.Log(x[i]/tc)/math.sqr(vp1)); state.j[i,2] = (ta-td)*(tb/tc)*vp0/math.sqr(vp1); state.j[i,3] = 1-1/vp1; state.j[i,4] = 0; } } else { state.fi[i] = td+(ta-td)/vp1-y[i]; if( state.needfij ) { state.j[i,0] = 1/vp1; state.j[i,1] = (ta-td)*-tg*Math.Pow(1+vp0, -tg-1)*vp0*Math.Log(x[i]/tc); state.j[i,2] = (ta-td)*-tg*Math.Pow(1+vp0, -tg-1)*vp0*-(tb/tc); state.j[i,3] = 1-1/vp1; state.j[i,4] = -((ta-td)/vp1*Math.Log(1+vp0)); } } } // // Add regularizer // for(i=0; i<=4; i++) { state.fi[n+i] = lambdav*state.x[i]; if( state.needfij ) { for(j=0; j<=4; j++) { state.j[n+i,j] = 0.0; } state.j[n+i,i] = lambdav; } } // // Done // continue; } alglib.ap.assert(false, "LogisticFitX: internal error"); } minlm.minlmresultsbuf(state, ref p1, replm, _params); alglib.ap.assert(replm.terminationtype>0, "LogisticFitX: internal error"); } /************************************************************************* Calculate errors for 4PL/5PL fit. Leaves other fields of Rep unchanged, so caller should properly initialize it with ClearRep() call. -- ALGLIB PROJECT -- Copyright 28.04.2017 by Bochkanov Sergey *************************************************************************/ private static void logisticfit45errors(double[] x, double[] y, int n, double a, double b, double c, double d, double g, lsfitreport rep, alglib.xparams _params) { int i = 0; int k = 0; double v = 0; double rss = 0; double tss = 0; double meany = 0; // // Calculate errors // rep.rmserror = 0; rep.avgerror = 0; rep.avgrelerror = 0; rep.maxerror = 0; k = 0; rss = 0.0; tss = 0.0; meany = 0.0; for(i=0; i<=n-1; i++) { meany = meany+y[i]; } meany = meany/n; for(i=0; i<=n-1; i++) { // // Calculate residual from regression // if( (double)(x[i])>(double)(0) ) { v = d+(a-d)/Math.Pow(1.0+Math.Pow(x[i]/c, b), g)-y[i]; } else { if( (double)(b)>=(double)(0) ) { v = a-y[i]; } else { v = d-y[i]; } } // // Update RSS (residual sum of squares) and TSS (total sum of squares) // which are used to calculate coefficient of determination. // // NOTE: we use formula R2 = 1-RSS/TSS because it has nice property of // being equal to 0.0 if and only if model perfectly fits data. // // When we fit nonlinear models, there are exist multiple ways of // determining R2, each of them giving different results. Formula // above is the most intuitive one. // rss = rss+v*v; tss = tss+math.sqr(y[i]-meany); // // Update errors // rep.rmserror = rep.rmserror+math.sqr(v); rep.avgerror = rep.avgerror+Math.Abs(v); if( (double)(y[i])!=(double)(0) ) { rep.avgrelerror = rep.avgrelerror+Math.Abs(v/y[i]); k = k+1; } rep.maxerror = Math.Max(rep.maxerror, Math.Abs(v)); } rep.rmserror = Math.Sqrt(rep.rmserror/n); rep.avgerror = rep.avgerror/n; if( k>0 ) { rep.avgrelerror = rep.avgrelerror/k; } rep.r2 = 1.0-rss/tss; } /************************************************************************* Internal spline fitting subroutine -- ALGLIB PROJECT -- Copyright 08.09.2009 by Bochkanov Sergey *************************************************************************/ private static void spline1dfitinternal(int st, double[] x, double[] y, double[] w, int n, double[] xc, double[] yc, int[] dc, int k, int m, ref int info, spline1d.spline1dinterpolant s, spline1d.spline1dfitreport rep, alglib.xparams _params) { double[,] fmatrix = new double[0,0]; double[,] cmatrix = new double[0,0]; double[] y2 = new double[0]; double[] w2 = new double[0]; double[] sx = new double[0]; double[] sy = new double[0]; double[] sd = new double[0]; double[] tmp = new double[0]; double[] xoriginal = new double[0]; double[] yoriginal = new double[0]; lsfitreport lrep = new lsfitreport(); double v0 = 0; double v1 = 0; double v2 = 0; double mx = 0; spline1d.spline1dinterpolant s2 = new spline1d.spline1dinterpolant(); int i = 0; int j = 0; int relcnt = 0; double xa = 0; double xb = 0; double sa = 0; double sb = 0; double bl = 0; double br = 0; double decay = 0; int i_ = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); w = (double[])w.Clone(); xc = (double[])xc.Clone(); yc = (double[])yc.Clone(); info = 0; alglib.ap.assert(st==0 || st==1, "Spline1DFit: internal error!"); if( st==0 && m<4 ) { info = -1; return; } if( st==1 && m<4 ) { info = -1; return; } if( (n<1 || k<0) || k>=m ) { info = -1; return; } for(i=0; i<=k-1; i++) { info = 0; if( dc[i]<0 ) { info = -1; } if( dc[i]>1 ) { info = -1; } if( info<0 ) { return; } } if( st==1 && m%2!=0 ) { // // Hermite fitter must have even number of basis functions // info = -2; return; } // // weight decay for correct handling of task which becomes // degenerate after constraints are applied // decay = 10000*math.machineepsilon; // // Scale X, Y, XC, YC // intfitserv.lsfitscalexy(ref x, ref y, ref w, n, ref xc, ref yc, dc, k, ref xa, ref xb, ref sa, ref sb, ref xoriginal, ref yoriginal, _params); // // allocate space, initialize: // * SX - grid for basis functions // * SY - values of basis functions at grid points // * FMatrix- values of basis functions at X[] // * CMatrix- values (derivatives) of basis functions at XC[] // y2 = new double[n+m]; w2 = new double[n+m]; fmatrix = new double[n+m, m]; if( k>0 ) { cmatrix = new double[k, m+1]; } if( st==0 ) { // // allocate space for cubic spline // sx = new double[m-2]; sy = new double[m-2]; for(j=0; j<=m-2-1; j++) { sx[j] = (double)(2*j)/(double)(m-2-1)-1; } } if( st==1 ) { // // allocate space for Hermite spline // sx = new double[m/2]; sy = new double[m/2]; sd = new double[m/2]; for(j=0; j<=m/2-1; j++) { sx[j] = (double)(2*j)/(double)(m/2-1)-1; } } // // Prepare design and constraints matrices: // * fill constraints matrix // * fill first N rows of design matrix with values // * fill next M rows of design matrix with regularizing term // * append M zeros to Y // * append M elements, mean(abs(W)) each, to W // for(j=0; j<=m-1; j++) { // // prepare Jth basis function // if( st==0 ) { // // cubic spline basis // for(i=0; i<=m-2-1; i++) { sy[i] = 0; } bl = 0; br = 0; if( j=0 && dc[i]<=2, "Spline1DFit: internal error!"); spline1d.spline1ddiff(s2, xc[i], ref v0, ref v1, ref v2, _params); if( dc[i]==0 ) { cmatrix[i,j] = v0; } if( dc[i]==1 ) { cmatrix[i,j] = v1; } if( dc[i]==2 ) { cmatrix[i,j] = v2; } } } for(i=0; i<=k-1; i++) { cmatrix[i,m] = yc[i]; } for(i=0; i<=m-1; i++) { for(j=0; j<=m-1; j++) { if( i==j ) { fmatrix[n+i,j] = decay; } else { fmatrix[n+i,j] = 0; } } } y2 = new double[n+m]; w2 = new double[n+m]; for(i_=0; i_<=n-1;i_++) { y2[i_] = y[i_]; } for(i_=0; i_<=n-1;i_++) { w2[i_] = w[i_]; } mx = 0; for(i=0; i<=n-1; i++) { mx = mx+Math.Abs(w[i]); } mx = mx/n; for(i=0; i<=m-1; i++) { y2[n+i] = 0; w2[n+i] = mx; } // // Solve constrained task // if( k>0 ) { // // solve using regularization // lsfitlinearwc(y2, w2, fmatrix, cmatrix, n+m, m, k, ref info, ref tmp, lrep, _params); } else { // // no constraints, no regularization needed // lsfitlinearwc(y, w, fmatrix, cmatrix, n, m, k, ref info, ref tmp, lrep, _params); } if( info<0 ) { return; } // // Generate spline and scale it // if( st==0 ) { // // cubic spline basis // for(i_=0; i_<=m-2-1;i_++) { sy[i_] = tmp[i_]; } spline1d.spline1dbuildcubic(sx, sy, m-2, 1, tmp[m-2], 1, tmp[m-1], s, _params); } if( st==1 ) { // // Hermite basis // for(i=0; i<=m/2-1; i++) { sy[i] = tmp[2*i]; sd[i] = tmp[2*i+1]; } spline1d.spline1dbuildhermite(sx, sy, sd, m/2, s, _params); } spline1d.spline1dlintransx(s, 2/(xb-xa), -((xa+xb)/(xb-xa)), _params); spline1d.spline1dlintransy(s, sb-sa, sa, _params); // // Scale absolute errors obtained from LSFitLinearW. // Relative error should be calculated separately // (because of shifting/scaling of the task) // rep.taskrcond = lrep.taskrcond; rep.rmserror = lrep.rmserror*(sb-sa); rep.avgerror = lrep.avgerror*(sb-sa); rep.maxerror = lrep.maxerror*(sb-sa); rep.avgrelerror = 0; relcnt = 0; for(i=0; i<=n-1; i++) { if( (double)(yoriginal[i])!=(double)(0) ) { rep.avgrelerror = rep.avgrelerror+Math.Abs(spline1d.spline1dcalc(s, xoriginal[i], _params)-yoriginal[i])/Math.Abs(yoriginal[i]); relcnt = relcnt+1; } } if( relcnt!=0 ) { rep.avgrelerror = rep.avgrelerror/relcnt; } } /************************************************************************* Internal fitting subroutine *************************************************************************/ private static void lsfitlinearinternal(double[] y, double[] w, double[,] fmatrix, int n, int m, ref int info, ref double[] c, lsfitreport rep, alglib.xparams _params) { double threshold = 0; double[,] ft = new double[0,0]; double[,] q = new double[0,0]; double[,] l = new double[0,0]; double[,] r = new double[0,0]; double[] b = new double[0]; double[] wmod = new double[0]; double[] tau = new double[0]; double[] nzeros = new double[0]; double[] s = new double[0]; int i = 0; int j = 0; double v = 0; double[] sv = new double[0]; double[,] u = new double[0,0]; double[,] vt = new double[0,0]; double[] tmp = new double[0]; double[] utb = new double[0]; double[] sutb = new double[0]; int relcnt = 0; int i_ = 0; info = 0; c = new double[0]; clearreport(rep, _params); if( n<1 || m<1 ) { info = -1; return; } info = 1; threshold = Math.Sqrt(math.machineepsilon); // // Degenerate case, needs special handling // if( n=M. Generate design matrix and reduce to N=M using // QR decomposition. // ft = new double[n, m]; b = new double[n]; for(j=0; j<=n-1; j++) { v = w[j]; for(i_=0; i_<=m-1;i_++) { ft[j,i_] = v*fmatrix[j,i_]; } b[j] = w[j]*y[j]; } ortfac.rmatrixqr(ref ft, n, m, ref tau, _params); ortfac.rmatrixqrunpackq(ft, n, m, tau, m, ref q, _params); ortfac.rmatrixqrunpackr(ft, n, m, ref r, _params); tmp = new double[m]; for(i=0; i<=m-1; i++) { tmp[i] = 0; } for(i=0; i<=n-1; i++) { v = b[i]; for(i_=0; i_<=m-1;i_++) { tmp[i_] = tmp[i_] + v*q[i,i_]; } } b = new double[m]; for(i_=0; i_<=m-1;i_++) { b[i_] = tmp[i_]; } // // R contains reduced MxM design upper triangular matrix, // B contains reduced Mx1 right part. // // Determine system condition number and decide // should we use triangular solver (faster) or // SVD-based solver (more stable). // // We can use LU-based RCond estimator for this task. // rep.taskrcond = rcond.rmatrixlurcondinf(r, m, _params); if( (double)(rep.taskrcond)>(double)(threshold) ) { // // use QR-based solver // c = new double[m]; c[m-1] = b[m-1]/r[m-1,m-1]; for(i=m-2; i>=0; i--) { v = 0.0; for(i_=i+1; i_<=m-1;i_++) { v += r[i,i_]*c[i_]; } c[i] = (b[i]-v)/r[i,i]; } } else { // // use SVD-based solver // if( !svd.rmatrixsvd(r, m, m, 1, 1, 2, ref sv, ref u, ref vt, _params) ) { info = -4; return; } utb = new double[m]; sutb = new double[m]; for(i=0; i<=m-1; i++) { utb[i] = 0; } for(i=0; i<=m-1; i++) { v = b[i]; for(i_=0; i_<=m-1;i_++) { utb[i_] = utb[i_] + v*u[i,i_]; } } if( (double)(sv[0])>(double)(0) ) { rep.taskrcond = sv[m-1]/sv[0]; for(i=0; i<=m-1; i++) { if( (double)(sv[i])>(double)(threshold*sv[0]) ) { sutb[i] = utb[i]/sv[i]; } else { sutb[i] = 0; } } } else { rep.taskrcond = 0; for(i=0; i<=m-1; i++) { sutb[i] = 0; } } c = new double[m]; for(i=0; i<=m-1; i++) { c[i] = 0; } for(i=0; i<=m-1; i++) { v = sutb[i]; for(i_=0; i_<=m-1;i_++) { c[i_] = c[i_] + v*vt[i,i_]; } } } // // calculate errors // rep.rmserror = 0; rep.avgerror = 0; rep.avgrelerror = 0; rep.maxerror = 0; relcnt = 0; for(i=0; i<=n-1; i++) { v = 0.0; for(i_=0; i_<=m-1;i_++) { v += fmatrix[i,i_]*c[i_]; } rep.rmserror = rep.rmserror+math.sqr(v-y[i]); rep.avgerror = rep.avgerror+Math.Abs(v-y[i]); if( (double)(y[i])!=(double)(0) ) { rep.avgrelerror = rep.avgrelerror+Math.Abs(v-y[i])/Math.Abs(y[i]); relcnt = relcnt+1; } rep.maxerror = Math.Max(rep.maxerror, Math.Abs(v-y[i])); } rep.rmserror = Math.Sqrt(rep.rmserror/n); rep.avgerror = rep.avgerror/n; if( relcnt!=0 ) { rep.avgrelerror = rep.avgrelerror/relcnt; } nzeros = new double[n]; s = new double[m]; for(i=0; i<=m-1; i++) { s[i] = 0; } for(i=0; i<=n-1; i++) { for(j=0; j<=m-1; j++) { s[j] = s[j]+math.sqr(fmatrix[i,j]); } nzeros[i] = 0; } for(i=0; i<=m-1; i++) { if( (double)(s[i])!=(double)(0) ) { s[i] = Math.Sqrt(1/s[i]); } else { s[i] = 1; } } estimateerrors(fmatrix, nzeros, y, w, c, s, n, m, rep, ref r, 1, _params); } /************************************************************************* Internal subroutine *************************************************************************/ private static void lsfitclearrequestfields(lsfitstate state, alglib.xparams _params) { state.needf = false; state.needfg = false; state.needfgh = false; state.xupdated = false; } /************************************************************************* Internal subroutine, calculates barycentric basis functions. Used for efficient simultaneous calculation of N basis functions. -- ALGLIB -- Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ private static void barycentriccalcbasis(ratint.barycentricinterpolant b, double t, ref double[] y, alglib.xparams _params) { double s2 = 0; double s = 0; double v = 0; int i = 0; int j = 0; int i_ = 0; // // special case: N=1 // if( b.n==1 ) { y[0] = 1; return; } // // Here we assume that task is normalized, i.e.: // 1. abs(Y[i])<=1 // 2. abs(W[i])<=1 // 3. X[] is ordered // // First, we decide: should we use "safe" formula (guarded // against overflow) or fast one? // s = Math.Abs(t-b.x[0]); for(i=0; i<=b.n-1; i++) { v = b.x[i]; if( (double)(v)==(double)(t) ) { for(j=0; j<=b.n-1; j++) { y[j] = 0; } y[i] = 1; return; } v = Math.Abs(t-v); if( (double)(v)<(double)(s) ) { s = v; } } s2 = 0; for(i=0; i<=b.n-1; i++) { v = s/(t-b.x[i]); v = v*b.w[i]; y[i] = v; s2 = s2+v; } v = 1/s2; for(i_=0; i_<=b.n-1;i_++) { y[i_] = v*y[i_]; } } /************************************************************************* This is internal function for Chebyshev fitting. It assumes that input data are normalized: * X/XC belong to [-1,+1], * mean(Y)=0, stddev(Y)=1. It does not checks inputs for errors. This function is used to fit general (shifted) Chebyshev models, power basis models or barycentric models. INPUT PARAMETERS: X - points, array[0..N-1]. Y - function values, array[0..N-1]. W - weights, array[0..N-1] N - number of points, N>0. XC - points where polynomial values/derivatives are constrained, array[0..K-1]. YC - values of constraints, array[0..K-1] DC - array[0..K-1], types of constraints: * DC[i]=0 means that P(XC[i])=YC[i] * DC[i]=1 means that P'(XC[i])=YC[i] K - number of constraints, 0<=K=1 OUTPUT PARAMETERS: Info- same format as in LSFitLinearW() subroutine: * Info>0 task is solved * Info<=0 an error occured: -4 means inconvergence of internal SVD -3 means inconsistent constraints C - interpolant in Chebyshev form; [-1,+1] is used as base interval Rep - report, same format as in LSFitLinearW() subroutine. Following fields are set: * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: this subroitine doesn't calculate task's condition number for K<>0. -- ALGLIB PROJECT -- Copyright 10.12.2009 by Bochkanov Sergey *************************************************************************/ private static void internalchebyshevfit(double[] x, double[] y, double[] w, int n, double[] xc, double[] yc, int[] dc, int k, int m, ref int info, ref double[] c, lsfitreport rep, alglib.xparams _params) { double[] y2 = new double[0]; double[] w2 = new double[0]; double[] tmp = new double[0]; double[] tmp2 = new double[0]; double[] tmpdiff = new double[0]; double[] bx = new double[0]; double[] by = new double[0]; double[] bw = new double[0]; double[,] fmatrix = new double[0,0]; double[,] cmatrix = new double[0,0]; int i = 0; int j = 0; double mx = 0; double decay = 0; int i_ = 0; xc = (double[])xc.Clone(); yc = (double[])yc.Clone(); info = 0; c = new double[0]; clearreport(rep, _params); // // weight decay for correct handling of task which becomes // degenerate after constraints are applied // decay = 10000*math.machineepsilon; // // allocate space, initialize/fill: // * FMatrix- values of basis functions at X[] // * CMatrix- values (derivatives) of basis functions at XC[] // * fill constraints matrix // * fill first N rows of design matrix with values // * fill next M rows of design matrix with regularizing term // * append M zeros to Y // * append M elements, mean(abs(W)) each, to W // y2 = new double[n+m]; w2 = new double[n+m]; tmp = new double[m]; tmpdiff = new double[m]; fmatrix = new double[n+m, m]; if( k>0 ) { cmatrix = new double[k, m+1]; } // // Fill design matrix, Y2, W2: // * first N rows with basis functions for original points // * next M rows with decay terms // for(i=0; i<=n-1; i++) { // // prepare Ith row // use Tmp for calculations to avoid multidimensional arrays overhead // for(j=0; j<=m-1; j++) { if( j==0 ) { tmp[j] = 1; } else { if( j==1 ) { tmp[j] = x[i]; } else { tmp[j] = 2*x[i]*tmp[j-1]-tmp[j-2]; } } } for(i_=0; i_<=m-1;i_++) { fmatrix[i,i_] = tmp[i_]; } } for(i=0; i<=m-1; i++) { for(j=0; j<=m-1; j++) { if( i==j ) { fmatrix[n+i,j] = decay; } else { fmatrix[n+i,j] = 0; } } } for(i_=0; i_<=n-1;i_++) { y2[i_] = y[i_]; } for(i_=0; i_<=n-1;i_++) { w2[i_] = w[i_]; } mx = 0; for(i=0; i<=n-1; i++) { mx = mx+Math.Abs(w[i]); } mx = mx/n; for(i=0; i<=m-1; i++) { y2[n+i] = 0; w2[n+i] = mx; } // // fill constraints matrix // for(i=0; i<=k-1; i++) { // // prepare Ith row // use Tmp for basis function values, // TmpDiff for basos function derivatives // for(j=0; j<=m-1; j++) { if( j==0 ) { tmp[j] = 1; tmpdiff[j] = 0; } else { if( j==1 ) { tmp[j] = xc[i]; tmpdiff[j] = 1; } else { tmp[j] = 2*xc[i]*tmp[j-1]-tmp[j-2]; tmpdiff[j] = 2*(tmp[j-1]+xc[i]*tmpdiff[j-1])-tmpdiff[j-2]; } } } if( dc[i]==0 ) { for(i_=0; i_<=m-1;i_++) { cmatrix[i,i_] = tmp[i_]; } } if( dc[i]==1 ) { for(i_=0; i_<=m-1;i_++) { cmatrix[i,i_] = tmpdiff[i_]; } } cmatrix[i,m] = yc[i]; } // // Solve constrained task // if( k>0 ) { // // solve using regularization // lsfitlinearwc(y2, w2, fmatrix, cmatrix, n+m, m, k, ref info, ref c, rep, _params); } else { // // no constraints, no regularization needed // lsfitlinearwc(y, w, fmatrix, cmatrix, n, m, 0, ref info, ref c, rep, _params); } if( info<0 ) { return; } } /************************************************************************* Internal Floater-Hormann fitting subroutine for fixed D *************************************************************************/ private static void barycentricfitwcfixedd(double[] x, double[] y, double[] w, int n, double[] xc, double[] yc, int[] dc, int k, int m, int d, ref int info, ratint.barycentricinterpolant b, barycentricfitreport rep, alglib.xparams _params) { double[,] fmatrix = new double[0,0]; double[,] cmatrix = new double[0,0]; double[] y2 = new double[0]; double[] w2 = new double[0]; double[] sx = new double[0]; double[] sy = new double[0]; double[] sbf = new double[0]; double[] xoriginal = new double[0]; double[] yoriginal = new double[0]; double[] tmp = new double[0]; lsfitreport lrep = new lsfitreport(); double v0 = 0; double v1 = 0; double mx = 0; ratint.barycentricinterpolant b2 = new ratint.barycentricinterpolant(); int i = 0; int j = 0; int relcnt = 0; double xa = 0; double xb = 0; double sa = 0; double sb = 0; double decay = 0; int i_ = 0; x = (double[])x.Clone(); y = (double[])y.Clone(); w = (double[])w.Clone(); xc = (double[])xc.Clone(); yc = (double[])yc.Clone(); info = 0; if( ((n<1 || m<2) || k<0) || k>=m ) { info = -1; return; } for(i=0; i<=k-1; i++) { info = 0; if( dc[i]<0 ) { info = -1; } if( dc[i]>1 ) { info = -1; } if( info<0 ) { return; } } // // weight decay for correct handling of task which becomes // degenerate after constraints are applied // decay = 10000*math.machineepsilon; // // Scale X, Y, XC, YC // intfitserv.lsfitscalexy(ref x, ref y, ref w, n, ref xc, ref yc, dc, k, ref xa, ref xb, ref sa, ref sb, ref xoriginal, ref yoriginal, _params); // // allocate space, initialize: // * FMatrix- values of basis functions at X[] // * CMatrix- values (derivatives) of basis functions at XC[] // y2 = new double[n+m]; w2 = new double[n+m]; fmatrix = new double[n+m, m]; if( k>0 ) { cmatrix = new double[k, m+1]; } y2 = new double[n+m]; w2 = new double[n+m]; // // Prepare design and constraints matrices: // * fill constraints matrix // * fill first N rows of design matrix with values // * fill next M rows of design matrix with regularizing term // * append M zeros to Y // * append M elements, mean(abs(W)) each, to W // sx = new double[m]; sy = new double[m]; sbf = new double[m]; for(j=0; j<=m-1; j++) { sx[j] = (double)(2*j)/(double)(m-1)-1; } for(i=0; i<=m-1; i++) { sy[i] = 1; } ratint.barycentricbuildfloaterhormann(sx, sy, m, d, b2, _params); mx = 0; for(i=0; i<=n-1; i++) { barycentriccalcbasis(b2, x[i], ref sbf, _params); for(i_=0; i_<=m-1;i_++) { fmatrix[i,i_] = sbf[i_]; } y2[i] = y[i]; w2[i] = w[i]; mx = mx+Math.Abs(w[i])/n; } for(i=0; i<=m-1; i++) { for(j=0; j<=m-1; j++) { if( i==j ) { fmatrix[n+i,j] = decay; } else { fmatrix[n+i,j] = 0; } } y2[n+i] = 0; w2[n+i] = mx; } if( k>0 ) { for(j=0; j<=m-1; j++) { for(i=0; i<=m-1; i++) { sy[i] = 0; } sy[j] = 1; ratint.barycentricbuildfloaterhormann(sx, sy, m, d, b2, _params); for(i=0; i<=k-1; i++) { alglib.ap.assert(dc[i]>=0 && dc[i]<=1, "BarycentricFit: internal error!"); ratint.barycentricdiff1(b2, xc[i], ref v0, ref v1, _params); if( dc[i]==0 ) { cmatrix[i,j] = v0; } if( dc[i]==1 ) { cmatrix[i,j] = v1; } } } for(i=0; i<=k-1; i++) { cmatrix[i,m] = yc[i]; } } // // Solve constrained task // if( k>0 ) { // // solve using regularization // lsfitlinearwc(y2, w2, fmatrix, cmatrix, n+m, m, k, ref info, ref tmp, lrep, _params); } else { // // no constraints, no regularization needed // lsfitlinearwc(y, w, fmatrix, cmatrix, n, m, k, ref info, ref tmp, lrep, _params); } if( info<0 ) { return; } // // Generate interpolant and scale it // for(i_=0; i_<=m-1;i_++) { sy[i_] = tmp[i_]; } ratint.barycentricbuildfloaterhormann(sx, sy, m, d, b, _params); ratint.barycentriclintransx(b, 2/(xb-xa), -((xa+xb)/(xb-xa)), _params); ratint.barycentriclintransy(b, sb-sa, sa, _params); // // Scale absolute errors obtained from LSFitLinearW. // Relative error should be calculated separately // (because of shifting/scaling of the task) // rep.taskrcond = lrep.taskrcond; rep.rmserror = lrep.rmserror*(sb-sa); rep.avgerror = lrep.avgerror*(sb-sa); rep.maxerror = lrep.maxerror*(sb-sa); rep.avgrelerror = 0; relcnt = 0; for(i=0; i<=n-1; i++) { if( (double)(yoriginal[i])!=(double)(0) ) { rep.avgrelerror = rep.avgrelerror+Math.Abs(ratint.barycentriccalc(b, xoriginal[i], _params)-yoriginal[i])/Math.Abs(yoriginal[i]); relcnt = relcnt+1; } } if( relcnt!=0 ) { rep.avgrelerror = rep.avgrelerror/relcnt; } } private static void clearreport(lsfitreport rep, alglib.xparams _params) { rep.taskrcond = 0; rep.iterationscount = 0; rep.varidx = -1; rep.rmserror = 0; rep.avgerror = 0; rep.avgrelerror = 0; rep.maxerror = 0; rep.wrmserror = 0; rep.r2 = 0; rep.covpar = new double[0, 0]; rep.errpar = new double[0]; rep.errcurve = new double[0]; rep.noise = new double[0]; } /************************************************************************* This internal function estimates covariance matrix and other error-related information for linear/nonlinear least squares model. It has a bit awkward interface, but it can be used for both linear and nonlinear problems. INPUT PARAMETERS: F1 - array[0..N-1,0..K-1]: * for linear problems - matrix of function values * for nonlinear problems - Jacobian matrix F0 - array[0..N-1]: * for linear problems - must be filled with zeros * for nonlinear problems - must store values of function being fitted Y - array[0..N-1]: * for linear and nonlinear problems - must store target values W - weights, array[0..N-1]: * for linear and nonlinear problems - weights X - array[0..K-1]: * for linear and nonlinear problems - current solution S - array[0..K-1]: * its components should be strictly positive * squared inverse of this diagonal matrix is used as damping factor for covariance matrix (linear and nonlinear problems) * for nonlinear problems, when scale of the variables is usually explicitly given by user, you may use scale vector for this parameter * for linear problems you may set this parameter to S=sqrt(1/diag(F'*F)) * this parameter is automatically rescaled by this function, only relative magnitudes of its components (with respect to each other) matter. N - number of points, N>0. K - number of dimensions Rep - structure which is used to store results Z - additional matrix which, depending on ZKind, may contain some information used to accelerate calculations - or just can be temporary buffer: * for ZKind=0 Z contains no information, just temporary buffer which can be resized and used as needed * for ZKind=1 Z contains triangular matrix from QR decomposition of W*F1. This matrix can be used to speedup calculation of covariance matrix. It should not be changed by algorithm. ZKind- contents of Z OUTPUT PARAMETERS: * Rep.CovPar covariance matrix for parameters, array[K,K]. * Rep.ErrPar errors in parameters, array[K], errpar = sqrt(diag(CovPar)) * Rep.ErrCurve vector of fit errors - standard deviations of empirical best-fit curve from "ideal" best-fit curve built with infinite number of samples, array[N]. errcurve = sqrt(diag(J*CovPar*J')), where J is Jacobian matrix. * Rep.Noise vector of per-point estimates of noise, array[N] * Rep.R2 coefficient of determination (non-weighted) Other fields of Rep are not changed. IMPORTANT: errors in parameters are calculated without taking into account boundary/linear constraints! Presence of constraints changes distribution of errors, but there is no easy way to account for constraints when you calculate covariance matrix. NOTE: noise in the data is estimated as follows: * for fitting without user-supplied weights all points are assumed to have same level of noise, which is estimated from the data * for fitting with user-supplied weights we assume that noise level in I-th point is inversely proportional to Ith weight. Coefficient of proportionality is estimated from the data. NOTE: we apply small amount of regularization when we invert squared Jacobian and calculate covariance matrix. It guarantees that algorithm won't divide by zero during inversion, but skews error estimates a bit (fractional error is about 10^-9). However, we believe that this difference is insignificant for all practical purposes except for the situation when you want to compare ALGLIB results with "reference" implementation up to the last significant digit. -- ALGLIB PROJECT -- Copyright 10.12.2009 by Bochkanov Sergey *************************************************************************/ private static void estimateerrors(double[,] f1, double[] f0, double[] y, double[] w, double[] x, double[] s, int n, int k, lsfitreport rep, ref double[,] z, int zkind, alglib.xparams _params) { int i = 0; int j = 0; int j1 = 0; double v = 0; double noisec = 0; int info = 0; matinv.matinvreport invrep = new matinv.matinvreport(); int nzcnt = 0; double avg = 0; double rss = 0; double tss = 0; double sz = 0; double ss = 0; int i_ = 0; s = (double[])s.Clone(); // // Compute NZCnt - count of non-zero weights // nzcnt = 0; for(i=0; i<=n-1; i++) { if( (double)(w[i])!=(double)(0) ) { nzcnt = nzcnt+1; } } // // Compute R2 // if( nzcnt>0 ) { avg = 0.0; for(i=0; i<=n-1; i++) { if( (double)(w[i])!=(double)(0) ) { avg = avg+y[i]; } } avg = avg/nzcnt; rss = 0.0; tss = 0.0; for(i=0; i<=n-1; i++) { if( (double)(w[i])!=(double)(0) ) { v = 0.0; for(i_=0; i_<=k-1;i_++) { v += f1[i,i_]*x[i_]; } v = v+f0[i]; rss = rss+math.sqr(v-y[i]); tss = tss+math.sqr(y[i]-avg); } } if( (double)(tss)!=(double)(0) ) { rep.r2 = Math.Max(1.0-rss/tss, 0.0); } else { rep.r2 = 1.0; } } else { rep.r2 = 0; } // // Compute estimate of proportionality between noise in the data and weights: // NoiseC = mean(per-point-noise*per-point-weight) // Noise level (standard deviation) at each point is equal to NoiseC/W[I]. // if( nzcnt>k ) { noisec = 0.0; for(i=0; i<=n-1; i++) { if( (double)(w[i])!=(double)(0) ) { v = 0.0; for(i_=0; i_<=k-1;i_++) { v += f1[i,i_]*x[i_]; } v = v+f0[i]; noisec = noisec+math.sqr((v-y[i])*w[i]); } } noisec = Math.Sqrt(noisec/(nzcnt-k)); } else { noisec = 0.0; } // // Two branches on noise level: // * NoiseC>0 normal situation // * NoiseC=0 degenerate case CovPar is filled by zeros // apserv.rmatrixsetlengthatleast(ref rep.covpar, k, k, _params); if( (double)(noisec)>(double)(0) ) { // // Normal situation: non-zero noise level // alglib.ap.assert(zkind==0 || zkind==1, "LSFit: internal error in EstimateErrors() function"); if( zkind==0 ) { // // Z contains no additional information which can be used to speed up // calculations. We have to calculate covariance matrix on our own: // * Compute scaled Jacobian N*J, where N[i,i]=WCur[I]/NoiseC, store in Z // * Compute Z'*Z, store in CovPar // * Apply moderate regularization to CovPar and compute matrix inverse. // In case inverse failed, increase regularization parameter and try // again. // apserv.rmatrixsetlengthatleast(ref z, n, k, _params); for(i=0; i<=n-1; i++) { v = w[i]/noisec; for(i_=0; i_<=k-1;i_++) { z[i,i_] = v*f1[i,i_]; } } // // Convert S to automatically scaled damped matrix: // * calculate SZ - sum of diagonal elements of Z'*Z // * calculate SS - sum of diagonal elements of S^(-2) // * overwrite S by (SZ/SS)*S^(-2) // * now S has approximately same magnitude as giagonal of Z'*Z // sz = 0; for(i=0; i<=n-1; i++) { for(j=0; j<=k-1; j++) { sz = sz+z[i,j]*z[i,j]; } } if( (double)(sz)==(double)(0) ) { sz = 1; } ss = 0; for(j=0; j<=k-1; j++) { ss = ss+1/math.sqr(s[j]); } for(j=0; j<=k-1; j++) { s[j] = sz/ss/math.sqr(s[j]); } // // Calculate damped inverse inv(Z'*Z+S). // We increase damping factor V until Z'*Z become well-conditioned. // v = 1.0E3*math.machineepsilon; do { ablas.rmatrixsyrk(k, n, 1.0, z, 0, 0, 2, 0.0, rep.covpar, 0, 0, true, _params); for(i=0; i<=k-1; i++) { rep.covpar[i,i] = rep.covpar[i,i]+v*s[i]; } matinv.spdmatrixinverse(ref rep.covpar, k, true, ref info, invrep, _params); v = 10*v; } while( info<=0 ); for(i=0; i<=k-1; i++) { for(j=i+1; j<=k-1; j++) { rep.covpar[j,i] = rep.covpar[i,j]; } } } if( zkind==1 ) { // // We can reuse additional information: // * Z contains R matrix from QR decomposition of W*F1 // * After multiplication by 1/NoiseC we get Z_mod = N*F1, where diag(N)=w[i]/NoiseC // * Such triangular Z_mod is a Cholesky factor from decomposition of J'*N'*N*J. // Thus, we can calculate covariance matrix as inverse of the matrix given by // its Cholesky decomposition. It allow us to avoid time-consuming calculation // of J'*N'*N*J in CovPar - complexity is reduced from O(N*K^2) to O(K^3), which // is quite good because K is usually orders of magnitude smaller than N. // // First, convert S to automatically scaled damped matrix: // * calculate SZ - sum of magnitudes of diagonal elements of Z/NoiseC // * calculate SS - sum of diagonal elements of S^(-1) // * overwrite S by (SZ/SS)*S^(-1) // * now S has approximately same magnitude as giagonal of Z'*Z // sz = 0; for(j=0; j<=k-1; j++) { sz = sz+Math.Abs(z[j,j]/noisec); } if( (double)(sz)==(double)(0) ) { sz = 1; } ss = 0; for(j=0; j<=k-1; j++) { ss = ss+1/s[j]; } for(j=0; j<=k-1; j++) { s[j] = sz/ss/s[j]; } // // Calculate damped inverse of inv((Z+v*S)'*(Z+v*S)) // We increase damping factor V until matrix become well-conditioned. // v = 1.0E3*math.machineepsilon; do { for(i=0; i<=k-1; i++) { for(j=i; j<=k-1; j++) { rep.covpar[i,j] = z[i,j]/noisec; } rep.covpar[i,i] = rep.covpar[i,i]+v*s[i]; } matinv.spdmatrixcholeskyinverse(ref rep.covpar, k, true, ref info, invrep, _params); v = 10*v; } while( info<=0 ); for(i=0; i<=k-1; i++) { for(j=i+1; j<=k-1; j++) { rep.covpar[j,i] = rep.covpar[i,j]; } } } } else { // // Degenerate situation: zero noise level, covariance matrix is zero. // for(i=0; i<=k-1; i++) { for(j=0; j<=k-1; j++) { rep.covpar[j,i] = 0; } } } // // Estimate erorrs in parameters, curve and per-point noise // apserv.rvectorsetlengthatleast(ref rep.errpar, k, _params); apserv.rvectorsetlengthatleast(ref rep.errcurve, n, _params); apserv.rvectorsetlengthatleast(ref rep.noise, n, _params); for(i=0; i<=k-1; i++) { rep.errpar[i] = Math.Sqrt(rep.covpar[i,i]); } for(i=0; i<=n-1; i++) { // // ErrCurve[I] is sqrt(P[i,i]) where P=J*CovPar*J' // v = 0.0; for(j=0; j<=k-1; j++) { for(j1=0; j1<=k-1; j1++) { v = v+f1[i,j]*rep.covpar[j,j1]*f1[i,j1]; } } rep.errcurve[i] = Math.Sqrt(v); // // Noise[i] is filled using weights and current estimate of noise level // if( (double)(w[i])!=(double)(0) ) { rep.noise[i] = noisec/w[i]; } else { rep.noise[i] = 0; } } } } public class rbfv2 { /************************************************************************* Buffer object which is used to perform nearest neighbor requests in the multithreaded mode (multiple threads working with same KD-tree object). This object should be created with KDTreeCreateBuffer(). *************************************************************************/ public class rbfv2calcbuffer : apobject { public double[] x; public double[] curboxmin; public double[] curboxmax; public double curdist2; public double[] x123; public double[] y123; public rbfv2calcbuffer() { init(); } public override void init() { x = new double[0]; curboxmin = new double[0]; curboxmax = new double[0]; x123 = new double[0]; y123 = new double[0]; } public override alglib.apobject make_copy() { rbfv2calcbuffer _result = new rbfv2calcbuffer(); _result.x = (double[])x.Clone(); _result.curboxmin = (double[])curboxmin.Clone(); _result.curboxmax = (double[])curboxmax.Clone(); _result.curdist2 = curdist2; _result.x123 = (double[])x123.Clone(); _result.y123 = (double[])y123.Clone(); return _result; } }; /************************************************************************* RBF model. Never try to work with fields of this object directly - always use ALGLIB functions to use this object. *************************************************************************/ public class rbfv2model : apobject { public int ny; public int nx; public int bf; public int nh; public double[] ri; public double[] s; public int[] kdroots; public int[] kdnodes; public double[] kdsplits; public double[] kdboxmin; public double[] kdboxmax; public double[] cw; public double[,] v; public double lambdareg; public int maxits; public double supportr; public int basisfunction; public rbfv2calcbuffer calcbuf; public rbfv2model() { init(); } public override void init() { ri = new double[0]; s = new double[0]; kdroots = new int[0]; kdnodes = new int[0]; kdsplits = new double[0]; kdboxmin = new double[0]; kdboxmax = new double[0]; cw = new double[0]; v = new double[0,0]; calcbuf = new rbfv2calcbuffer(); } public override alglib.apobject make_copy() { rbfv2model _result = new rbfv2model(); _result.ny = ny; _result.nx = nx; _result.bf = bf; _result.nh = nh; _result.ri = (double[])ri.Clone(); _result.s = (double[])s.Clone(); _result.kdroots = (int[])kdroots.Clone(); _result.kdnodes = (int[])kdnodes.Clone(); _result.kdsplits = (double[])kdsplits.Clone(); _result.kdboxmin = (double[])kdboxmin.Clone(); _result.kdboxmax = (double[])kdboxmax.Clone(); _result.cw = (double[])cw.Clone(); _result.v = (double[,])v.Clone(); _result.lambdareg = lambdareg; _result.maxits = maxits; _result.supportr = supportr; _result.basisfunction = basisfunction; _result.calcbuf = (rbfv2calcbuffer)calcbuf.make_copy(); return _result; } }; /************************************************************************* Internal buffer for GridCalc3 *************************************************************************/ public class rbfv2gridcalcbuffer : apobject { public rbfv2calcbuffer calcbuf; public double[] cx; public double[] rx; public double[] ry; public double[] tx; public double[] ty; public bool[] rf; public rbfv2gridcalcbuffer() { init(); } public override void init() { calcbuf = new rbfv2calcbuffer(); cx = new double[0]; rx = new double[0]; ry = new double[0]; tx = new double[0]; ty = new double[0]; rf = new bool[0]; } public override alglib.apobject make_copy() { rbfv2gridcalcbuffer _result = new rbfv2gridcalcbuffer(); _result.calcbuf = (rbfv2calcbuffer)calcbuf.make_copy(); _result.cx = (double[])cx.Clone(); _result.rx = (double[])rx.Clone(); _result.ry = (double[])ry.Clone(); _result.tx = (double[])tx.Clone(); _result.ty = (double[])ty.Clone(); _result.rf = (bool[])rf.Clone(); return _result; } }; /************************************************************************* RBF solution report: * TerminationType - termination type, positive values - success, non-positive - failure. *************************************************************************/ public class rbfv2report : apobject { public int terminationtype; public double maxerror; public double rmserror; public rbfv2report() { init(); } public override void init() { } public override alglib.apobject make_copy() { rbfv2report _result = new rbfv2report(); _result.terminationtype = terminationtype; _result.maxerror = maxerror; _result.rmserror = rmserror; return _result; } }; public const double defaultlambdareg = 1.0E-6; public const double defaultsupportr = 0.10; public const int defaultmaxits = 400; public const int defaultbf = 1; public const int maxnodesize = 6; public const double complexitymultiplier = 100.0; /************************************************************************* This function creates RBF model for a scalar (NY=1) or vector (NY>1) function in a NX-dimensional space (NX=2 or NX=3). INPUT PARAMETERS: NX - dimension of the space, NX=2 or NX=3 NY - function dimension, NY>=1 OUTPUT PARAMETERS: S - RBF model (initially equals to zero) -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfv2create(int nx, int ny, rbfv2model s, alglib.xparams _params) { int i = 0; int j = 0; alglib.ap.assert(nx>=1, "RBFCreate: NX<1"); alglib.ap.assert(ny>=1, "RBFCreate: NY<1"); // // Serializable parameters // s.nx = nx; s.ny = ny; s.bf = 0; s.nh = 0; s.v = new double[ny, nx+1]; for(i=0; i<=ny-1; i++) { for(j=0; j<=nx; j++) { s.v[i,j] = 0; } } // // Non-serializable parameters // s.lambdareg = defaultlambdareg; s.maxits = defaultmaxits; s.supportr = defaultsupportr; s.basisfunction = defaultbf; } /************************************************************************* This function creates buffer structure which can be used to perform parallel RBF model evaluations (with one RBF model instance being used from multiple threads, as long as different threads use different instances of buffer). This buffer object can be used with rbftscalcbuf() function (here "ts" stands for "thread-safe", "buf" is a suffix which denotes function which reuses previously allocated output space). How to use it: * create RBF model structure with rbfcreate() * load data, tune parameters * call rbfbuildmodel() * call rbfcreatecalcbuffer(), once per thread working with RBF model (you should call this function only AFTER call to rbfbuildmodel(), see below for more information) * call rbftscalcbuf() from different threads, with each thread working with its own copy of buffer object. INPUT PARAMETERS S - RBF model OUTPUT PARAMETERS Buf - external buffer. IMPORTANT: buffer object should be used only with RBF model object which was used to initialize buffer. Any attempt to use buffer with different object is dangerous - you may get memory violation error because sizes of internal arrays do not fit to dimensions of RBF structure. IMPORTANT: you should call this function only for model which was built with rbfbuildmodel() function, after successful invocation of rbfbuildmodel(). Sizes of some internal structures are determined only after model is built, so buffer object created before model construction stage will be useless (and any attempt to use it will result in exception). -- ALGLIB -- Copyright 02.04.2016 by Sergey Bochkanov *************************************************************************/ public static void rbfv2createcalcbuffer(rbfv2model s, rbfv2calcbuffer buf, alglib.xparams _params) { allocatecalcbuffer(s, buf, _params); } /************************************************************************* This function builds hierarchical RBF model. INPUT PARAMETERS: X - array[N,S.NX], X-values Y - array[N,S.NY], Y-values ScaleVec- array[S.NX], vector of per-dimension scales N - points count ATerm - linear term type, 1 for linear, 2 for constant, 3 for zero. NH - hierarchy height RBase - base RBF radius BF - basis function type: 0 for Gaussian, 1 for compact LambdaNS- non-smoothness penalty coefficient. Exactly zero value means that no penalty is applied, and even system matrix does not contain penalty-related rows. Value of 1 means S - RBF model, initialized by RBFCreate() call. progress10000- variable used for progress reports, it is regularly set to the current progress multiplied by 10000, in order to get value in [0,10000] range. The rationale for such scaling is that it allows us to use integer type to store progress, which has less potential for non-atomic corruption on unprotected reads from another threads. You can read this variable from some other thread to get estimate of the current progress. Initial value of this variable is ignored, it is written by this function, but not read. terminationrequest - variable used for termination requests; its initial value must be False, and you can set it to True from some other thread. This routine regularly checks this variable and will terminate model construction shortly upon discovering that termination was requested. OUTPUT PARAMETERS: S - updated model (for rep.terminationtype>0, unchanged otherwise) Rep - report: * Rep.TerminationType: * -5 - non-distinct basis function centers were detected, interpolation aborted * -4 - nonconvergence of the internal SVD solver * 1 - successful termination * 8 terminated by user via rbfrequesttermination() Fields are used for debugging purposes: * Rep.IterationsCount - iterations count of the LSQR solver * Rep.NMV - number of matrix-vector products * Rep.ARows - rows count for the system matrix * Rep.ACols - columns count for the system matrix * Rep.ANNZ - number of significantly non-zero elements (elements above some algorithm-determined threshold) NOTE: failure to build model will leave current state of the structure unchanged. -- ALGLIB -- Copyright 20.06.2016 by Bochkanov Sergey *************************************************************************/ public static void rbfv2buildhierarchical(double[,] x, double[,] y, int n, double[] scalevec, int aterm, int nh, double rbase, double lambdans, rbfv2model s, ref int progress10000, ref bool terminationrequest, rbfv2report rep, alglib.xparams _params) { int nx = 0; int ny = 0; int bf = 0; double[,] rhs = new double[0,0]; double[,] residualy = new double[0,0]; double[,] v = new double[0,0]; int rowsperpoint = 0; int[] hidx = new int[0]; double[] xr = new double[0]; double[] ri = new double[0]; int[] kdroots = new int[0]; int[] kdnodes = new int[0]; double[] kdsplits = new double[0]; double[] kdboxmin = new double[0]; double[] kdboxmax = new double[0]; double[] cw = new double[0]; int[] cwrange = new int[0]; double[,] curxy = new double[0,0]; int curn = 0; int nbasis = 0; nearestneighbor.kdtree curtree = new nearestneighbor.kdtree(); nearestneighbor.kdtree globaltree = new nearestneighbor.kdtree(); double[] x0 = new double[0]; double[] x1 = new double[0]; int[] tags = new int[0]; double[] dist = new double[0]; int[] nncnt = new int[0]; int[] rowsizes = new int[0]; double[] diagata = new double[0]; double[] prec = new double[0]; double[] tmpx = new double[0]; int i = 0; int j = 0; int k = 0; int k2 = 0; int levelidx = 0; int offsi = 0; int offsj = 0; double val = 0; double criticalr = 0; int cnt = 0; double avgdiagata = 0; double[] avgrowsize = new double[0]; double sumrowsize = 0; double rprogress = 0; int maxits = 0; linlsqr.linlsqrstate linstate = new linlsqr.linlsqrstate(); linlsqr.linlsqrreport lsqrrep = new linlsqr.linlsqrreport(); sparse.sparsematrix sparseacrs = new sparse.sparsematrix(); double[] densew1 = new double[0]; double[] denseb1 = new double[0]; rbfv2calcbuffer calcbuf = new rbfv2calcbuffer(); double[] vr2 = new double[0]; int[] voffs = new int[0]; int[] rowindexes = new int[0]; double[] rowvals = new double[0]; double penalty = 0; alglib.ap.assert(s.nx>0, "RBFV2BuildHierarchical: incorrect NX"); alglib.ap.assert(s.ny>0, "RBFV2BuildHierarchical: incorrect NY"); alglib.ap.assert((double)(lambdans)>=(double)(0), "RBFV2BuildHierarchical: incorrect LambdaNS"); for(j=0; j<=s.nx-1; j++) { alglib.ap.assert((double)(scalevec[j])>(double)(0), "RBFV2BuildHierarchical: incorrect ScaleVec"); } nx = s.nx; ny = s.ny; bf = s.basisfunction; alglib.ap.assert(bf==0 || bf==1, "RBFV2BuildHierarchical: incorrect BF"); // // Clean up communication and report fields // progress10000 = 0; rep.maxerror = 0; rep.rmserror = 0; // // Quick exit when we have no points // if( n==0 ) { zerofill(s, nx, ny, bf, _params); rep.terminationtype = 1; progress10000 = 10000; return; } // // First model in a sequence - linear model. // Residuals from linear regression are stored in the ResidualY variable // (used later to build RBF models). // residualy = new double[n, ny]; for(i=0; i<=n-1; i++) { for(j=0; j<=ny-1; j++) { residualy[i,j] = y[i,j]; } } if( !rbfv2buildlinearmodel(x, ref residualy, n, nx, ny, aterm, ref v, _params) ) { zerofill(s, nx, ny, bf, _params); rep.terminationtype = -5; progress10000 = 10000; return; } // // Handle special case: multilayer model with NLayers=0. // Quick exit. // if( nh==0 ) { rep.terminationtype = 1; zerofill(s, nx, ny, bf, _params); for(i=0; i<=ny-1; i++) { for(j=0; j<=nx; j++) { s.v[i,j] = v[i,j]; } } rep.maxerror = 0; rep.rmserror = 0; for(i=0; i<=n-1; i++) { for(j=0; j<=ny-1; j++) { rep.maxerror = Math.Max(rep.maxerror, Math.Abs(residualy[i,j])); rep.rmserror = rep.rmserror+math.sqr(residualy[i,j]); } } rep.rmserror = Math.Sqrt(rep.rmserror/(n*ny)); progress10000 = 10000; return; } // // Penalty coefficient is set to LambdaNS*RBase^2. // // We use such normalization because VALUES of radial basis // functions have roughly unit magnitude, but their DERIVATIVES // are (roughly) inversely proportional to the radius. Thus, // without additional scaling, regularization coefficient // looses invariancy w.r.t. scaling of variables. // if( (double)(lambdans)==(double)(0) ) { rowsperpoint = 1; } else { // // NOTE: simplified penalty function is used, which does not provide rotation invariance // rowsperpoint = 1+nx; } penalty = lambdans*math.sqr(rbase); // // Prepare temporary structures // rhs = new double[n*rowsperpoint, ny]; curxy = new double[n, nx+ny]; x0 = new double[nx]; x1 = new double[nx]; tags = new int[n]; dist = new double[n]; vr2 = new double[n]; voffs = new int[n]; nncnt = new int[n]; rowsizes = new int[n*rowsperpoint]; denseb1 = new double[n*rowsperpoint]; for(i=0; i<=n*rowsperpoint-1; i++) { for(j=0; j<=ny-1; j++) { rhs[i,j] = 0; } } for(i=0; i<=n-1; i++) { for(j=0; j<=nx-1; j++) { curxy[i,j] = x[i,j]/scalevec[j]; } for(j=0; j<=ny-1; j++) { rhs[i*rowsperpoint,j] = residualy[i,j]; } tags[i] = i; } nearestneighbor.kdtreebuildtagged(curxy, tags, n, nx, 0, 2, globaltree, _params); // // Generate sequence of layer radii. // Prepare assignment of different levels to points. // alglib.ap.assert(n>0, "RBFV2BuildHierarchical: integrity check failed"); ri = new double[nh]; for(levelidx=0; levelidx<=nh-1; levelidx++) { ri[levelidx] = rbase*Math.Pow(2, -levelidx); } hidx = new int[n]; xr = new double[n]; for(i=0; i<=n-1; i++) { hidx[i] = nh; xr[i] = math.maxrealnumber; alglib.ap.assert((double)(xr[i])>(double)(ri[0]), "RBFV2BuildHierarchical: integrity check failed"); } for(levelidx=0; levelidx<=nh-1; levelidx++) { // // Scan dataset points, for each such point that distance to nearest // "support" point is larger than SupportR*Ri[LevelIdx] we: // * set distance of current point to 0 (it is support now) and update HIdx // * perform R-NN request with radius SupportR*Ri[LevelIdx] // * for each point in request update its distance // criticalr = s.supportr*ri[levelidx]; for(i=0; i<=n-1; i++) { if( (double)(xr[i])>(double)(criticalr) ) { // // Mark point as support // alglib.ap.assert(hidx[i]==nh, "RBFV2BuildHierarchical: integrity check failed"); hidx[i] = levelidx; xr[i] = 0; // // Update neighbors // for(j=0; j<=nx-1; j++) { x0[j] = x[i,j]/scalevec[j]; } k = nearestneighbor.kdtreequeryrnn(globaltree, x0, criticalr, true, _params); nearestneighbor.kdtreequeryresultstags(globaltree, ref tags, _params); nearestneighbor.kdtreequeryresultsdistances(globaltree, ref dist, _params); for(j=0; j<=k-1; j++) { xr[tags[j]] = Math.Min(xr[tags[j]], dist[j]); } } } } // // Build multitree (with zero weights) according to hierarchy. // // NOTE: this code assumes that during every iteration kdNodes, // kdSplits and CW have size which EXACTLY fits their // contents, and that these variables are resized at each // iteration when we add new hierarchical model. // kdroots = new int[nh+1]; kdnodes = new int[0]; kdsplits = new double[0]; kdboxmin = new double[nx]; kdboxmax = new double[nx]; cw = new double[0]; cwrange = new int[nh+1]; nearestneighbor.kdtreeexplorebox(globaltree, ref kdboxmin, ref kdboxmax, _params); cwrange[0] = 0; for(levelidx=0; levelidx<=nh-1; levelidx++) { // // Prepare radius and root offset // kdroots[levelidx] = alglib.ap.len(kdnodes); // // Generate LevelIdx-th tree and append to multi-tree // curn = 0; for(i=0; i<=n-1; i++) { if( hidx[i]<=levelidx ) { for(j=0; j<=nx-1; j++) { curxy[curn,j] = x[i,j]/scalevec[j]; } for(j=0; j<=ny-1; j++) { curxy[curn,nx+j] = 0; } apserv.inc(ref curn, _params); } } alglib.ap.assert(curn>0, "RBFV2BuildHierarchical: integrity check failed"); nearestneighbor.kdtreebuild(curxy, curn, nx, ny, 2, curtree, _params); convertandappendtree(curtree, curn, nx, ny, ref kdnodes, ref kdsplits, ref cw, _params); // // Fill entry of CWRange (we assume that length of CW exactly fits its actual size) // cwrange[levelidx+1] = alglib.ap.len(cw); } kdroots[nh] = alglib.ap.len(kdnodes); // // Prepare buffer and scaled dataset // allocatecalcbuffer(s, calcbuf, _params); for(i=0; i<=n-1; i++) { for(j=0; j<=nx-1; j++) { curxy[i,j] = x[i,j]/scalevec[j]; } } // // Calculate average row sizes for each layer; these values are used // for smooth progress reporting (it adds some overhead, but in most // cases - insignificant one). // apserv.rvectorsetlengthatleast(ref avgrowsize, nh, _params); sumrowsize = 0; for(levelidx=0; levelidx<=nh-1; levelidx++) { cnt = 0; for(i=0; i<=n-1; i++) { for(j=0; j<=nx-1; j++) { x0[j] = curxy[i,j]; } cnt = cnt+designmatrixrowsize(kdnodes, kdsplits, cw, ri, kdroots, kdboxmin, kdboxmax, nx, ny, nh, levelidx, rbfv2nearradius(bf, _params), x0, calcbuf, _params); } avgrowsize[levelidx] = apserv.coalesce(cnt, 1, _params)/apserv.coalesce(n, 1, _params); sumrowsize = sumrowsize+avgrowsize[levelidx]; } // // Build unconstrained model with LSQR solver, applied layer by layer // for(levelidx=0; levelidx<=nh-1; levelidx++) { // // Generate A - matrix of basis functions (near radius is used) // // NOTE: AvgDiagATA is average value of diagonal element of A^T*A. // It is used to calculate value of Tikhonov regularization // coefficient. // nbasis = (cwrange[levelidx+1]-cwrange[levelidx])/(nx+ny); alglib.ap.assert(cwrange[levelidx+1]-cwrange[levelidx]==nbasis*(nx+ny)); for(i=0; i<=n-1; i++) { for(j=0; j<=nx-1; j++) { x0[j] = curxy[i,j]; } cnt = designmatrixrowsize(kdnodes, kdsplits, cw, ri, kdroots, kdboxmin, kdboxmax, nx, ny, nh, levelidx, rbfv2nearradius(bf, _params), x0, calcbuf, _params); nncnt[i] = cnt; for(j=0; j<=rowsperpoint-1; j++) { rowsizes[i*rowsperpoint+j] = cnt; } } apserv.ivectorsetlengthatleast(ref rowindexes, nbasis, _params); apserv.rvectorsetlengthatleast(ref rowvals, nbasis*rowsperpoint, _params); apserv.rvectorsetlengthatleast(ref diagata, nbasis, _params); sparse.sparsecreatecrsbuf(n*rowsperpoint, nbasis, rowsizes, sparseacrs, _params); avgdiagata = 0.0; for(j=0; j<=nbasis-1; j++) { diagata[j] = 0; } for(i=0; i<=n-1; i++) { // // Fill design matrix row, diagonal of A^T*A // for(j=0; j<=nx-1; j++) { x0[j] = curxy[i,j]; } designmatrixgeneraterow(kdnodes, kdsplits, cw, ri, kdroots, kdboxmin, kdboxmax, cwrange, nx, ny, nh, levelidx, bf, rbfv2nearradius(bf, _params), rowsperpoint, penalty, x0, calcbuf, vr2, voffs, rowindexes, rowvals, ref cnt, _params); alglib.ap.assert(cnt==nncnt[i], "RBFV2BuildHierarchical: integrity check failed"); for(k=0; k<=rowsperpoint-1; k++) { for(j=0; j<=cnt-1; j++) { val = rowvals[j*rowsperpoint+k]; sparse.sparseset(sparseacrs, i*rowsperpoint+k, rowindexes[j], val, _params); avgdiagata = avgdiagata+math.sqr(val); diagata[rowindexes[j]] = diagata[rowindexes[j]]+math.sqr(val); } } // // Handle possible termination requests // if( terminationrequest ) { // // Request for termination was submitted, terminate immediately // zerofill(s, nx, ny, bf, _params); rep.terminationtype = 8; progress10000 = 10000; return; } } avgdiagata = avgdiagata/nbasis; apserv.rvectorsetlengthatleast(ref prec, nbasis, _params); for(j=0; j<=nbasis-1; j++) { prec[j] = 1/apserv.coalesce(Math.Sqrt(diagata[j]), 1, _params); } // // solve // maxits = apserv.coalescei(s.maxits, defaultmaxits, _params); apserv.rvectorsetlengthatleast(ref tmpx, nbasis, _params); linlsqr.linlsqrcreate(n*rowsperpoint, nbasis, linstate, _params); linlsqr.linlsqrsetcond(linstate, 0.0, 0.0, maxits, _params); linlsqr.linlsqrsetlambdai(linstate, Math.Sqrt(s.lambdareg*avgdiagata), _params); for(j=0; j<=ny-1; j++) { for(i=0; i<=n*rowsperpoint-1; i++) { denseb1[i] = rhs[i,j]; } linlsqr.linlsqrsetb(linstate, denseb1, _params); linlsqr.linlsqrrestart(linstate, _params); linlsqr.linlsqrsetxrep(linstate, true, _params); while( linlsqr.linlsqriteration(linstate, _params) ) { if( terminationrequest ) { // // Request for termination was submitted, terminate immediately // zerofill(s, nx, ny, bf, _params); rep.terminationtype = 8; progress10000 = 10000; return; } if( linstate.needmv ) { for(i=0; i<=nbasis-1; i++) { tmpx[i] = prec[i]*linstate.x[i]; } sparse.sparsemv(sparseacrs, tmpx, ref linstate.mv, _params); continue; } if( linstate.needmtv ) { sparse.sparsemtv(sparseacrs, linstate.x, ref linstate.mtv, _params); for(i=0; i<=nbasis-1; i++) { linstate.mtv[i] = prec[i]*linstate.mtv[i]; } continue; } if( linstate.xupdated ) { rprogress = 0; for(i=0; i<=levelidx-1; i++) { rprogress = rprogress+maxits*ny*avgrowsize[i]; } rprogress = rprogress+(linlsqr.linlsqrpeekiterationscount(linstate, _params)+j*maxits)*avgrowsize[levelidx]; rprogress = rprogress/(sumrowsize*maxits*ny); rprogress = 10000*rprogress; rprogress = Math.Max(rprogress, 0); rprogress = Math.Min(rprogress, 10000); alglib.ap.assert(progress10000<=(int)Math.Round(rprogress)+1, "HRBF: integrity check failed (progress indicator) even after +1 safeguard correction"); progress10000 = (int)Math.Round(rprogress); continue; } alglib.ap.assert(false, "HRBF: unexpected request from LSQR solver"); } linlsqr.linlsqrresults(linstate, ref densew1, lsqrrep, _params); alglib.ap.assert(lsqrrep.terminationtype>0, "RBFV2BuildHierarchical: integrity check failed"); for(i=0; i<=nbasis-1; i++) { densew1[i] = prec[i]*densew1[i]; } for(i=0; i<=nbasis-1; i++) { offsi = cwrange[levelidx]+(nx+ny)*i; cw[offsi+nx+j] = densew1[i]; } } // // Update residuals (far radius is used) // for(i=0; i<=n-1; i++) { for(j=0; j<=nx-1; j++) { x0[j] = curxy[i,j]; } designmatrixgeneraterow(kdnodes, kdsplits, cw, ri, kdroots, kdboxmin, kdboxmax, cwrange, nx, ny, nh, levelidx, bf, rbfv2farradius(bf, _params), rowsperpoint, penalty, x0, calcbuf, vr2, voffs, rowindexes, rowvals, ref cnt, _params); for(j=0; j<=cnt-1; j++) { offsj = cwrange[levelidx]+(nx+ny)*rowindexes[j]+nx; for(k=0; k<=rowsperpoint-1; k++) { val = rowvals[j*rowsperpoint+k]; for(k2=0; k2<=ny-1; k2++) { rhs[i*rowsperpoint+k,k2] = rhs[i*rowsperpoint+k,k2]-val*cw[offsj+k2]; } } } } } // // Model is built. // // Copy local variables by swapping, global ones (ScaleVec) are copied // explicitly. // s.bf = bf; s.nh = nh; alglib.ap.swap(ref s.ri, ref ri); alglib.ap.swap(ref s.kdroots, ref kdroots); alglib.ap.swap(ref s.kdnodes, ref kdnodes); alglib.ap.swap(ref s.kdsplits, ref kdsplits); alglib.ap.swap(ref s.kdboxmin, ref kdboxmin); alglib.ap.swap(ref s.kdboxmax, ref kdboxmax); alglib.ap.swap(ref s.cw, ref cw); alglib.ap.swap(ref s.v, ref v); s.s = new double[nx]; for(i=0; i<=nx-1; i++) { s.s[i] = scalevec[i]; } rep.terminationtype = 1; // // Calculate maximum and RMS errors // rep.maxerror = 0; rep.rmserror = 0; for(i=0; i<=n-1; i++) { for(j=0; j<=ny-1; j++) { rep.maxerror = Math.Max(rep.maxerror, Math.Abs(rhs[i*rowsperpoint,j])); rep.rmserror = rep.rmserror+math.sqr(rhs[i*rowsperpoint,j]); } } rep.rmserror = Math.Sqrt(rep.rmserror/(n*ny)); // // Update progress reports // progress10000 = 10000; } /************************************************************************* Serializer: allocation -- ALGLIB -- Copyright 02.02.2012 by Bochkanov Sergey *************************************************************************/ public static void rbfv2alloc(alglib.serializer s, rbfv2model model, alglib.xparams _params) { // // Data // s.alloc_entry(); s.alloc_entry(); s.alloc_entry(); s.alloc_entry(); apserv.allocrealarray(s, model.ri, -1, _params); apserv.allocrealarray(s, model.s, -1, _params); apserv.allocintegerarray(s, model.kdroots, -1, _params); apserv.allocintegerarray(s, model.kdnodes, -1, _params); apserv.allocrealarray(s, model.kdsplits, -1, _params); apserv.allocrealarray(s, model.kdboxmin, -1, _params); apserv.allocrealarray(s, model.kdboxmax, -1, _params); apserv.allocrealarray(s, model.cw, -1, _params); apserv.allocrealmatrix(s, model.v, -1, -1, _params); } /************************************************************************* Serializer: serialization -- ALGLIB -- Copyright 02.02.2012 by Bochkanov Sergey *************************************************************************/ public static void rbfv2serialize(alglib.serializer s, rbfv2model model, alglib.xparams _params) { // // Data // s.serialize_int(model.nx); s.serialize_int(model.ny); s.serialize_int(model.nh); s.serialize_int(model.bf); apserv.serializerealarray(s, model.ri, -1, _params); apserv.serializerealarray(s, model.s, -1, _params); apserv.serializeintegerarray(s, model.kdroots, -1, _params); apserv.serializeintegerarray(s, model.kdnodes, -1, _params); apserv.serializerealarray(s, model.kdsplits, -1, _params); apserv.serializerealarray(s, model.kdboxmin, -1, _params); apserv.serializerealarray(s, model.kdboxmax, -1, _params); apserv.serializerealarray(s, model.cw, -1, _params); apserv.serializerealmatrix(s, model.v, -1, -1, _params); } /************************************************************************* Serializer: unserialization -- ALGLIB -- Copyright 02.02.2012 by Bochkanov Sergey *************************************************************************/ public static void rbfv2unserialize(alglib.serializer s, rbfv2model model, alglib.xparams _params) { int nx = 0; int ny = 0; // // Unserialize primary model parameters, initialize model. // // It is necessary to call RBFCreate() because some internal fields // which are NOT unserialized will need initialization. // nx = s.unserialize_int(); ny = s.unserialize_int(); rbfv2create(nx, ny, model, _params); model.nh = s.unserialize_int(); model.bf = s.unserialize_int(); apserv.unserializerealarray(s, ref model.ri, _params); apserv.unserializerealarray(s, ref model.s, _params); apserv.unserializeintegerarray(s, ref model.kdroots, _params); apserv.unserializeintegerarray(s, ref model.kdnodes, _params); apserv.unserializerealarray(s, ref model.kdsplits, _params); apserv.unserializerealarray(s, ref model.kdboxmin, _params); apserv.unserializerealarray(s, ref model.kdboxmax, _params); apserv.unserializerealarray(s, ref model.cw, _params); apserv.unserializerealmatrix(s, ref model.v, _params); } /************************************************************************* Returns far radius for basis function type *************************************************************************/ public static double rbfv2farradius(int bf, alglib.xparams _params) { double result = 0; result = 1; if( bf==0 ) { result = 5.0; } if( bf==1 ) { result = 3; } return result; } /************************************************************************* Returns near radius for basis function type *************************************************************************/ public static double rbfv2nearradius(int bf, alglib.xparams _params) { double result = 0; result = 1; if( bf==0 ) { result = 3.0; } if( bf==1 ) { result = 3; } return result; } /************************************************************************* Returns basis function value. Assumes that D2>=0 *************************************************************************/ public static double rbfv2basisfunc(int bf, double d2, alglib.xparams _params) { double result = 0; double v = 0; result = 0; if( bf==0 ) { result = Math.Exp(-d2); return result; } if( bf==1 ) { // // if D2<3: // Exp(1)*Exp(-D2)*Exp(-1/(1-D2/9)) // else: // 0 // v = 1-d2/9; if( (double)(v)<=(double)(0) ) { result = 0; return result; } result = 2.718281828459045*Math.Exp(-d2)*Math.Exp(-(1/v)); return result; } alglib.ap.assert(false, "RBFV2BasisFunc: unknown BF type"); return result; } /************************************************************************* Returns basis function value, first and second derivatives Assumes that D2>=0 *************************************************************************/ public static void rbfv2basisfuncdiff2(int bf, double d2, ref double f, ref double df, ref double d2f, alglib.xparams _params) { double v = 0; f = 0; df = 0; d2f = 0; if( bf==0 ) { f = Math.Exp(-d2); df = -f; d2f = f; return; } if( bf==1 ) { // // if D2<3: // F = Exp(1)*Exp(-D2)*Exp(-1/(1-D2/9)) // dF = -F * [pow(D2/9-1,-2)/9 + 1] // d2F = -dF * [pow(D2/9-1,-2)/9 + 1] + F*(2/81)*pow(D2/9-1,-3) // else: // 0 // v = 1-d2/9; if( (double)(v)<=(double)(0) ) { f = 0; df = 0; d2f = 0; return; } f = Math.Exp(1)*Math.Exp(-d2)*Math.Exp(-(1/v)); df = -(f*(1/(9*v*v)+1)); d2f = -(df*(1/(9*v*v)+1))+f*((double)2/(double)81)/(v*v*v); return; } alglib.ap.assert(false, "RBFV2BasisFuncDiff2: unknown BF type"); } /************************************************************************* This function calculates values of the RBF model in the given point. This function should be used when we have NY=1 (scalar function) and NX=1 (1-dimensional space). This function returns 0.0 when: * model is not initialized * NX<>1 *NY<>1 INPUT PARAMETERS: S - RBF model X0 - X-coordinate, finite number RESULT: value of the model or 0.0 (as defined above) -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static double rbfv2calc1(rbfv2model s, double x0, alglib.xparams _params) { double result = 0; alglib.ap.assert(math.isfinite(x0), "RBFCalc1: invalid value for X0 (X0 is Inf)!"); if( s.ny!=1 || s.nx!=1 ) { result = 0; return result; } result = s.v[0,0]*x0-s.v[0,1]; if( s.nh==0 ) { return result; } allocatecalcbuffer(s, s.calcbuf, _params); s.calcbuf.x123[0] = x0; rbfv2tscalcbuf(s, s.calcbuf, s.calcbuf.x123, ref s.calcbuf.y123, _params); result = s.calcbuf.y123[0]; return result; } /************************************************************************* This function calculates values of the RBF model in the given point. This function should be used when we have NY=1 (scalar function) and NX=2 (2-dimensional space). If you have 3-dimensional space, use RBFCalc3(). If you have general situation (NX-dimensional space, NY-dimensional function) you should use general, less efficient implementation RBFCalc(). If you want to calculate function values many times, consider using RBFGridCalc2(), which is far more efficient than many subsequent calls to RBFCalc2(). This function returns 0.0 when: * model is not initialized * NX<>2 *NY<>1 INPUT PARAMETERS: S - RBF model X0 - first coordinate, finite number X1 - second coordinate, finite number RESULT: value of the model or 0.0 (as defined above) -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static double rbfv2calc2(rbfv2model s, double x0, double x1, alglib.xparams _params) { double result = 0; alglib.ap.assert(math.isfinite(x0), "RBFCalc2: invalid value for X0 (X0 is Inf)!"); alglib.ap.assert(math.isfinite(x1), "RBFCalc2: invalid value for X1 (X1 is Inf)!"); if( s.ny!=1 || s.nx!=2 ) { result = 0; return result; } result = s.v[0,0]*x0+s.v[0,1]*x1+s.v[0,2]; if( s.nh==0 ) { return result; } allocatecalcbuffer(s, s.calcbuf, _params); s.calcbuf.x123[0] = x0; s.calcbuf.x123[1] = x1; rbfv2tscalcbuf(s, s.calcbuf, s.calcbuf.x123, ref s.calcbuf.y123, _params); result = s.calcbuf.y123[0]; return result; } /************************************************************************* This function calculates values of the RBF model in the given point. This function should be used when we have NY=1 (scalar function) and NX=3 (3-dimensional space). If you have 2-dimensional space, use RBFCalc2(). If you have general situation (NX-dimensional space, NY-dimensional function) you should use general, less efficient implementation RBFCalc(). This function returns 0.0 when: * model is not initialized * NX<>3 *NY<>1 INPUT PARAMETERS: S - RBF model X0 - first coordinate, finite number X1 - second coordinate, finite number X2 - third coordinate, finite number RESULT: value of the model or 0.0 (as defined above) -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static double rbfv2calc3(rbfv2model s, double x0, double x1, double x2, alglib.xparams _params) { double result = 0; alglib.ap.assert(math.isfinite(x0), "RBFCalc3: invalid value for X0 (X0 is Inf or NaN)!"); alglib.ap.assert(math.isfinite(x1), "RBFCalc3: invalid value for X1 (X1 is Inf or NaN)!"); alglib.ap.assert(math.isfinite(x2), "RBFCalc3: invalid value for X2 (X2 is Inf or NaN)!"); if( s.ny!=1 || s.nx!=3 ) { result = 0; return result; } result = s.v[0,0]*x0+s.v[0,1]*x1+s.v[0,2]*x2+s.v[0,3]; if( s.nh==0 ) { return result; } allocatecalcbuffer(s, s.calcbuf, _params); s.calcbuf.x123[0] = x0; s.calcbuf.x123[1] = x1; s.calcbuf.x123[2] = x2; rbfv2tscalcbuf(s, s.calcbuf, s.calcbuf.x123, ref s.calcbuf.y123, _params); result = s.calcbuf.y123[0]; return result; } /************************************************************************* This function calculates values of the RBF model at the given point. Same as RBFCalc(), but does not reallocate Y when in is large enough to store function values. INPUT PARAMETERS: S - RBF model X - coordinates, array[NX]. X may have more than NX elements, in this case only leading NX will be used. Y - possibly preallocated array OUTPUT PARAMETERS: Y - function value, array[NY]. Y is not reallocated when it is larger than NY. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfv2calcbuf(rbfv2model s, double[] x, ref double[] y, alglib.xparams _params) { rbfv2tscalcbuf(s, s.calcbuf, x, ref y, _params); } /************************************************************************* This function calculates values of the RBF model at the given point, using external buffer object (internal temporaries of RBF model are not modified). This function allows to use same RBF model object in different threads, assuming that different threads use different instances of buffer structure. INPUT PARAMETERS: S - RBF model, may be shared between different threads Buf - buffer object created for this particular instance of RBF model with rbfcreatecalcbuffer(). X - coordinates, array[NX]. X may have more than NX elements, in this case only leading NX will be used. Y - possibly preallocated array OUTPUT PARAMETERS: Y - function value, array[NY]. Y is not reallocated when it is larger than NY. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfv2tscalcbuf(rbfv2model s, rbfv2calcbuffer buf, double[] x, ref double[] y, alglib.xparams _params) { int i = 0; int j = 0; int levelidx = 0; double rcur = 0; double rquery2 = 0; double invrc2 = 0; int nx = 0; int ny = 0; alglib.ap.assert(alglib.ap.len(x)>=s.nx, "RBFCalcBuf: Length(X)(double)(buf.curboxmax[j]) ) { buf.curdist2 = buf.curdist2+math.sqr(buf.x[j]-buf.curboxmax[j]); } } } // // Call PartialCalcRec() // rcur = s.ri[levelidx]; invrc2 = 1/(rcur*rcur); rquery2 = math.sqr(rcur*rbfv2farradius(s.bf, _params)); partialcalcrec(s, buf, s.kdroots[levelidx], invrc2, rquery2, buf.x, y, _params); } } /************************************************************************* This function calculates values of the RBF model at the regular grid. Grid have N0*N1 points, with Point[I,J] = (X0[I], X1[J]) This function returns 0.0 when: * model is not initialized * NX<>2 *NY<>1 INPUT PARAMETERS: S - RBF model X0 - array of grid nodes, first coordinates, array[N0] N0 - grid size (number of nodes) in the first dimension X1 - array of grid nodes, second coordinates, array[N1] N1 - grid size (number of nodes) in the second dimension OUTPUT PARAMETERS: Y - function values, array[N0,N1]. Y is out-variable and is reallocated by this function. NOTE: as a special exception, this function supports unordered arrays X0 and X1. However, future versions may be more efficient for X0/X1 ordered by ascending. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfv2gridcalc2(rbfv2model s, double[] x0, int n0, double[] x1, int n1, ref double[,] y, alglib.xparams _params) { double[] cpx0 = new double[0]; double[] cpx1 = new double[0]; double[] dummyx2 = new double[0]; double[] dummyx3 = new double[0]; bool[] dummyflag = new bool[0]; int[] p01 = new int[0]; int[] p11 = new int[0]; int[] p2 = new int[0]; double[] vy = new double[0]; int i = 0; int j = 0; y = new double[0,0]; alglib.ap.assert(n0>0, "RBFGridCalc2: invalid value for N0 (N0<=0)!"); alglib.ap.assert(n1>0, "RBFGridCalc2: invalid value for N1 (N1<=0)!"); alglib.ap.assert(alglib.ap.len(x0)>=n0, "RBFGridCalc2: Length(X0)=n1, "RBFGridCalc2: Length(X1)=4 || ((alglib.ap.len(x3)>=1 && (double)(x3[0])==(double)(0)) && n3==1), "RBFGridCalcVX: integrity check failed"); alglib.ap.assert(s.nx>=3 || ((alglib.ap.len(x2)>=1 && (double)(x2[0])==(double)(0)) && n2==1), "RBFGridCalcVX: integrity check failed"); alglib.ap.assert(s.nx>=2 || ((alglib.ap.len(x1)>=1 && (double)(x1[0])==(double)(0)) && n1==1), "RBFGridCalcVX: integrity check failed"); // // Allocate arrays // alglib.ap.assert(s.nx<=4, "RBFGridCalcVX: integrity check failed"); z = new double[ny]; tx = new double[4]; ty = new double[ny]; // // Calculate linear term // rowcnt = n1*n2*n3; for(rowidx=0; rowidx<=rowcnt-1; rowidx++) { // // Calculate TX - current position // k = rowidx; tx[0] = 0; tx[1] = x1[k%n1]; k = k/n1; tx[2] = x2[k%n2]; k = k/n2; tx[3] = x3[k%n3]; k = k/n3; alglib.ap.assert(k==0, "RBFGridCalcVX: integrity check failed"); for(j=0; j<=ny-1; j++) { v = s.v[j,nx]; for(k=1; k<=nx-1; k++) { v = v+tx[k]*s.v[j,k]; } z[j] = v; } for(i=0; i<=n0-1; i++) { dstoffs = ny*(rowidx*n0+i); if( sparsey && !flagy[rowidx*n0+i] ) { for(j=0; j<=ny-1; j++) { y[j+dstoffs] = 0; } continue; } v = x0[i]; for(j=0; j<=ny-1; j++) { y[j+dstoffs] = z[j]+v*s.v[j,0]; } } } if( s.nh==0 ) { return; } // // Process RBF terms, layer by layer // for(levelidx=0; levelidx<=s.nh-1; levelidx++) { rcur = s.ri[levelidx]; blockwidth0 = 1; blockwidth1 = 1; blockwidth2 = 1; blockwidth3 = 1; if( nx>=1 ) { blockwidth0 = rcur*s.s[0]; } if( nx>=2 ) { blockwidth1 = rcur*s.s[1]; } if( nx>=3 ) { blockwidth2 = rcur*s.s[2]; } if( nx>=4 ) { blockwidth3 = rcur*s.s[3]; } maxblocksize = 8; // // Group grid nodes into blocks according to current radius // blocks0 = new int[n0+1]; blockscnt0 = 0; blocks0[0] = 0; for(i=1; i<=n0-1; i++) { if( (double)(x0[i]-x0[blocks0[blockscnt0]])>(double)(blockwidth0) || i-blocks0[blockscnt0]>=maxblocksize ) { apserv.inc(ref blockscnt0, _params); blocks0[blockscnt0] = i; } } apserv.inc(ref blockscnt0, _params); blocks0[blockscnt0] = n0; blocks1 = new int[n1+1]; blockscnt1 = 0; blocks1[0] = 0; for(i=1; i<=n1-1; i++) { if( (double)(x1[i]-x1[blocks1[blockscnt1]])>(double)(blockwidth1) || i-blocks1[blockscnt1]>=maxblocksize ) { apserv.inc(ref blockscnt1, _params); blocks1[blockscnt1] = i; } } apserv.inc(ref blockscnt1, _params); blocks1[blockscnt1] = n1; blocks2 = new int[n2+1]; blockscnt2 = 0; blocks2[0] = 0; for(i=1; i<=n2-1; i++) { if( (double)(x2[i]-x2[blocks2[blockscnt2]])>(double)(blockwidth2) || i-blocks2[blockscnt2]>=maxblocksize ) { apserv.inc(ref blockscnt2, _params); blocks2[blockscnt2] = i; } } apserv.inc(ref blockscnt2, _params); blocks2[blockscnt2] = n2; blocks3 = new int[n3+1]; blockscnt3 = 0; blocks3[0] = 0; for(i=1; i<=n3-1; i++) { if( (double)(x3[i]-x3[blocks3[blockscnt3]])>(double)(blockwidth3) || i-blocks3[blockscnt3]>=maxblocksize ) { apserv.inc(ref blockscnt3, _params); blocks3[blockscnt3] = i; } } apserv.inc(ref blockscnt3, _params); blocks3[blockscnt3] = n3; // // Prepare seed for shared pool // allocatecalcbuffer(s, bufseedv2.calcbuf, _params); alglib.smp.ae_shared_pool_set_seed(bufpool, bufseedv2); // // Determine average number of neighbor per node // searchradius2 = math.sqr(rcur*rbfv2farradius(s.bf, _params)); ntrials = 100; avgfuncpernode = 0.0; for(i=0; i<=ntrials-1; i++) { tx[0] = x0[hqrnd.hqrnduniformi(rs, n0, _params)]; tx[1] = x1[hqrnd.hqrnduniformi(rs, n1, _params)]; tx[2] = x2[hqrnd.hqrnduniformi(rs, n2, _params)]; tx[3] = x3[hqrnd.hqrnduniformi(rs, n3, _params)]; preparepartialquery(tx, s.kdboxmin, s.kdboxmax, nx, bufseedv2.calcbuf, ref dummy, _params); avgfuncpernode = avgfuncpernode+(double)partialcountrec(s.kdnodes, s.kdsplits, s.cw, nx, ny, bufseedv2.calcbuf, s.kdroots[levelidx], searchradius2, tx, _params)/(double)ntrials; } // // Perform calculation in multithreaded mode // rbfv2partialgridcalcrec(s, x0, n0, x1, n1, x2, n2, x3, n3, blocks0, 0, blockscnt0, blocks1, 0, blockscnt1, blocks2, 0, blockscnt2, blocks3, 0, blockscnt3, flagy, sparsey, levelidx, avgfuncpernode, bufpool, y, _params); } } public static void rbfv2partialgridcalcrec(rbfv2model s, double[] x0, int n0, double[] x1, int n1, double[] x2, int n2, double[] x3, int n3, int[] blocks0, int block0a, int block0b, int[] blocks1, int block1a, int block1b, int[] blocks2, int block2a, int block2b, int[] blocks3, int block3a, int block3b, bool[] flagy, bool sparsey, int levelidx, double avgfuncpernode, alglib.smp.shared_pool bufpool, double[] y, alglib.xparams _params) { int nx = 0; int ny = 0; int k = 0; int l = 0; int blkidx = 0; int blkcnt = 0; int nodeidx = 0; int nodescnt = 0; int rowidx = 0; int rowscnt = 0; int i0 = 0; int i1 = 0; int i2 = 0; int i3 = 0; int j0 = 0; int j1 = 0; int j2 = 0; int j3 = 0; double rcur = 0; double invrc2 = 0; double rquery2 = 0; double rfar2 = 0; int dstoffs = 0; int srcoffs = 0; int dummy = 0; double rowwidth = 0; double maxrowwidth = 0; double problemcost = 0; int maxbs = 0; int midpoint = 0; bool emptyrow = new bool(); rbfv2gridcalcbuffer buf = null; nx = s.nx; ny = s.ny; // // Integrity checks // alglib.ap.assert(s.nx==2 || s.nx==3, "RBFV2PartialGridCalcRec: integrity check failed"); // // Try to split large problem // problemcost = s.ny*2*(avgfuncpernode+1); problemcost = problemcost*(blocks0[block0b]-blocks0[block0a]); problemcost = problemcost*(blocks1[block1b]-blocks1[block1a]); problemcost = problemcost*(blocks2[block2b]-blocks2[block2a]); problemcost = problemcost*(blocks3[block3b]-blocks3[block3a]); maxbs = 0; maxbs = Math.Max(maxbs, block0b-block0a); maxbs = Math.Max(maxbs, block1b-block1a); maxbs = Math.Max(maxbs, block2b-block2a); maxbs = Math.Max(maxbs, block3b-block3a); if( (double)(problemcost*complexitymultiplier)>=(double)(apserv.smpactivationlevel(_params)) ) { if( _trypexec_rbfv2partialgridcalcrec(s,x0,n0,x1,n1,x2,n2,x3,n3,blocks0,block0a,block0b,blocks1,block1a,block1b,blocks2,block2a,block2b,blocks3,block3a,block3b,flagy,sparsey,levelidx,avgfuncpernode,bufpool,y, _params) ) { return; } } if( (double)(problemcost*complexitymultiplier)>=(double)(apserv.spawnlevel(_params)) && maxbs>=2 ) { if( block0b-block0a==maxbs ) { midpoint = block0a+maxbs/2; rbfv2partialgridcalcrec(s, x0, n0, x1, n1, x2, n2, x3, n3, blocks0, block0a, midpoint, blocks1, block1a, block1b, blocks2, block2a, block2b, blocks3, block3a, block3b, flagy, sparsey, levelidx, avgfuncpernode, bufpool, y, _params); rbfv2partialgridcalcrec(s, x0, n0, x1, n1, x2, n2, x3, n3, blocks0, midpoint, block0b, blocks1, block1a, block1b, blocks2, block2a, block2b, blocks3, block3a, block3b, flagy, sparsey, levelidx, avgfuncpernode, bufpool, y, _params); return; } if( block1b-block1a==maxbs ) { midpoint = block1a+maxbs/2; rbfv2partialgridcalcrec(s, x0, n0, x1, n1, x2, n2, x3, n3, blocks0, block0a, block0b, blocks1, block1a, midpoint, blocks2, block2a, block2b, blocks3, block3a, block3b, flagy, sparsey, levelidx, avgfuncpernode, bufpool, y, _params); rbfv2partialgridcalcrec(s, x0, n0, x1, n1, x2, n2, x3, n3, blocks0, block0a, block0b, blocks1, midpoint, block1b, blocks2, block2a, block2b, blocks3, block3a, block3b, flagy, sparsey, levelidx, avgfuncpernode, bufpool, y, _params); return; } if( block2b-block2a==maxbs ) { midpoint = block2a+maxbs/2; rbfv2partialgridcalcrec(s, x0, n0, x1, n1, x2, n2, x3, n3, blocks0, block0a, block0b, blocks1, block1a, block1b, blocks2, block2a, midpoint, blocks3, block3a, block3b, flagy, sparsey, levelidx, avgfuncpernode, bufpool, y, _params); rbfv2partialgridcalcrec(s, x0, n0, x1, n1, x2, n2, x3, n3, blocks0, block0a, block0b, blocks1, block1a, block1b, blocks2, midpoint, block2b, blocks3, block3a, block3b, flagy, sparsey, levelidx, avgfuncpernode, bufpool, y, _params); return; } if( block3b-block3a==maxbs ) { midpoint = block3a+maxbs/2; rbfv2partialgridcalcrec(s, x0, n0, x1, n1, x2, n2, x3, n3, blocks0, block0a, block0b, blocks1, block1a, block1b, blocks2, block2a, block2b, blocks3, block3a, midpoint, flagy, sparsey, levelidx, avgfuncpernode, bufpool, y, _params); rbfv2partialgridcalcrec(s, x0, n0, x1, n1, x2, n2, x3, n3, blocks0, block0a, block0b, blocks1, block1a, block1b, blocks2, block2a, block2b, blocks3, midpoint, block3b, flagy, sparsey, levelidx, avgfuncpernode, bufpool, y, _params); return; } alglib.ap.assert(false, "RBFV2PartialGridCalcRec: integrity check failed"); } // // Retrieve buffer object from pool (it will be returned later) // alglib.smp.ae_shared_pool_retrieve(bufpool, ref buf); // // Calculate RBF model // alglib.ap.assert(nx<=4, "RBFV2PartialGridCalcRec: integrity check failed"); buf.tx = new double[4]; buf.cx = new double[4]; buf.ty = new double[ny]; rcur = s.ri[levelidx]; invrc2 = 1/(rcur*rcur); blkcnt = (block3b-block3a)*(block2b-block2a)*(block1b-block1a)*(block0b-block0a); for(blkidx=0; blkidx<=blkcnt-1; blkidx++) { // // Select block (I0,I1,I2,I3). // // NOTE: for problems with NX<4 corresponding I_? are zero. // k = blkidx; i0 = block0a+k%(block0b-block0a); k = k/(block0b-block0a); i1 = block1a+k%(block1b-block1a); k = k/(block1b-block1a); i2 = block2a+k%(block2b-block2a); k = k/(block2b-block2a); i3 = block3a+k%(block3b-block3a); k = k/(block3b-block3a); alglib.ap.assert(k==0, "RBFV2PartialGridCalcRec: integrity check failed"); // // We partitioned grid into blocks and selected block with // index (I0,I1,I2,I3). This block is a 4D cube (some dimensions // may be zero) of nodes with indexes (J0,J1,J2,J3), which is // further partitioned into a set of rows, each row corresponding // to indexes J1...J3 being fixed. // // We process block row by row, and each row may be handled // by either "generic" (nodes are processed separately) or // batch algorithm (that's the reason to use rows, after all). // // // Process nodes of the block // rowscnt = (blocks3[i3+1]-blocks3[i3])*(blocks2[i2+1]-blocks2[i2])*(blocks1[i1+1]-blocks1[i1]); for(rowidx=0; rowidx<=rowscnt-1; rowidx++) { // // Find out node indexes (*,J1,J2,J3). // // NOTE: for problems with NX<4 corresponding J_? are zero. // k = rowidx; j1 = blocks1[i1]+k%(blocks1[i1+1]-blocks1[i1]); k = k/(blocks1[i1+1]-blocks1[i1]); j2 = blocks2[i2]+k%(blocks2[i2+1]-blocks2[i2]); k = k/(blocks2[i2+1]-blocks2[i2]); j3 = blocks3[i3]+k%(blocks3[i3+1]-blocks3[i3]); k = k/(blocks3[i3+1]-blocks3[i3]); alglib.ap.assert(k==0, "RBFV2PartialGridCalcRec: integrity check failed"); // // Analyze row, skip completely empty rows // nodescnt = blocks0[i0+1]-blocks0[i0]; srcoffs = blocks0[i0]+(j1+(j2+j3*n2)*n1)*n0; emptyrow = true; for(nodeidx=0; nodeidx<=nodescnt-1; nodeidx++) { emptyrow = emptyrow && (sparsey && !flagy[srcoffs+nodeidx]); } if( emptyrow ) { continue; } // // Process row - use either "batch" (rowsize>1) or "generic" // (row size is 1) algorithm. // // NOTE: "generic" version may also be used as fallback code for // situations when we do not want to use batch code. // maxrowwidth = 0.5*rbfv2nearradius(s.bf, _params)*rcur*s.s[0]; rowwidth = x0[blocks0[i0+1]-1]-x0[blocks0[i0]]; if( nodescnt>1 && (double)(rowwidth)<=(double)(maxrowwidth) ) { // // "Batch" code which processes entire row at once, saving // some time in kd-tree search code. // rquery2 = math.sqr(rcur*rbfv2farradius(s.bf, _params)+0.5*rowwidth/s.s[0]); rfar2 = math.sqr(rcur*rbfv2farradius(s.bf, _params)); j0 = blocks0[i0]; if( nx>0 ) { buf.cx[0] = (x0[j0]+0.5*rowwidth)/s.s[0]; } if( nx>1 ) { buf.cx[1] = x1[j1]/s.s[1]; } if( nx>2 ) { buf.cx[2] = x2[j2]/s.s[2]; } if( nx>3 ) { buf.cx[3] = x3[j3]/s.s[3]; } srcoffs = j0+(j1+(j2+j3*n2)*n1)*n0; dstoffs = ny*srcoffs; apserv.rvectorsetlengthatleast(ref buf.rx, nodescnt, _params); apserv.bvectorsetlengthatleast(ref buf.rf, nodescnt, _params); apserv.rvectorsetlengthatleast(ref buf.ry, nodescnt*ny, _params); for(nodeidx=0; nodeidx<=nodescnt-1; nodeidx++) { buf.rx[nodeidx] = x0[j0+nodeidx]/s.s[0]; buf.rf[nodeidx] = !sparsey || flagy[srcoffs+nodeidx]; } for(k=0; k<=nodescnt*ny-1; k++) { buf.ry[k] = 0; } preparepartialquery(buf.cx, s.kdboxmin, s.kdboxmax, nx, buf.calcbuf, ref dummy, _params); partialrowcalcrec(s, buf.calcbuf, s.kdroots[levelidx], invrc2, rquery2, rfar2, buf.cx, buf.rx, buf.rf, nodescnt, buf.ry, _params); for(k=0; k<=nodescnt*ny-1; k++) { y[dstoffs+k] = y[dstoffs+k]+buf.ry[k]; } } else { // // "Generic" code. Although we usually move here // only when NodesCnt=1, we still use a loop on // NodeIdx just to be able to use this branch as // fallback code without any modifications. // rquery2 = math.sqr(rcur*rbfv2farradius(s.bf, _params)); for(nodeidx=0; nodeidx<=nodescnt-1; nodeidx++) { // // Prepare TX - current point // j0 = blocks0[i0]+nodeidx; if( nx>0 ) { buf.tx[0] = x0[j0]/s.s[0]; } if( nx>1 ) { buf.tx[1] = x1[j1]/s.s[1]; } if( nx>2 ) { buf.tx[2] = x2[j2]/s.s[2]; } if( nx>3 ) { buf.tx[3] = x3[j3]/s.s[3]; } // // Evaluate and add to Y // srcoffs = j0+(j1+(j2+j3*n2)*n1)*n0; dstoffs = ny*srcoffs; for(l=0; l<=ny-1; l++) { buf.ty[l] = 0; } if( !sparsey || flagy[srcoffs] ) { preparepartialquery(buf.tx, s.kdboxmin, s.kdboxmax, nx, buf.calcbuf, ref dummy, _params); partialcalcrec(s, buf.calcbuf, s.kdroots[levelidx], invrc2, rquery2, buf.tx, buf.ty, _params); } for(l=0; l<=ny-1; l++) { y[dstoffs+l] = y[dstoffs+l]+buf.ty[l]; } } } } } // // Recycle buffer object back to pool // alglib.smp.ae_shared_pool_recycle(bufpool, ref buf); } /************************************************************************* Serial stub for GPL edition. *************************************************************************/ public static bool _trypexec_rbfv2partialgridcalcrec(rbfv2model s, double[] x0, int n0, double[] x1, int n1, double[] x2, int n2, double[] x3, int n3, int[] blocks0, int block0a, int block0b, int[] blocks1, int block1a, int block1b, int[] blocks2, int block2a, int block2b, int[] blocks3, int block3a, int block3b, bool[] flagy, bool sparsey, int levelidx, double avgfuncpernode, alglib.smp.shared_pool bufpool, double[] y, alglib.xparams _params) { return false; } /************************************************************************* This function "unpacks" RBF model by extracting its coefficients. INPUT PARAMETERS: S - RBF model OUTPUT PARAMETERS: NX - dimensionality of argument NY - dimensionality of the target function XWR - model information, array[NC,NX+NY+1]. One row of the array corresponds to one basis function: * first NX columns - coordinates of the center * next NY columns - weights, one per dimension of the function being modelled * last NX columns - radii, per dimension NC - number of the centers V - polynomial term , array[NY,NX+1]. One row per one dimension of the function being modelled. First NX elements are linear coefficients, V[NX] is equal to the constant part. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfv2unpack(rbfv2model s, ref int nx, ref int ny, ref double[,] xwr, ref int nc, ref double[,] v, alglib.xparams _params) { int i = 0; int ncactual = 0; int i_ = 0; nx = 0; ny = 0; xwr = new double[0,0]; nc = 0; v = new double[0,0]; nx = s.nx; ny = s.ny; nc = 0; // // Fill V // v = new double[s.ny, s.nx+1]; for(i=0; i<=s.ny-1; i++) { for(i_=0; i_<=s.nx;i_++) { v[i,i_] = s.v[i,i_]; } } // // Fill XWR // alglib.ap.assert(alglib.ap.len(s.cw)%(s.nx+s.ny)==0, "RBFV2Unpack: integrity error"); nc = alglib.ap.len(s.cw)/(s.nx+s.ny); ncactual = 0; if( nc>0 ) { xwr = new double[nc, s.nx+s.ny+s.nx]; for(i=0; i<=s.nh-1; i++) { partialunpackrec(s.kdnodes, s.kdsplits, s.cw, s.s, s.nx, s.ny, s.kdroots[i], s.ri[i], xwr, ref ncactual, _params); } } alglib.ap.assert(nc==ncactual, "RBFV2Unpack: integrity error"); } private static bool rbfv2buildlinearmodel(double[,] x, ref double[,] y, int n, int nx, int ny, int modeltype, ref double[,] v, alglib.xparams _params) { bool result = new bool(); double[] tmpy = new double[0]; double[,] a = new double[0,0]; double scaling = 0; double[] shifting = new double[0]; double mn = 0; double mx = 0; double[] c = new double[0]; lsfit.lsfitreport rep = new lsfit.lsfitreport(); int i = 0; int j = 0; int k = 0; int info = 0; v = new double[0,0]; alglib.ap.assert(n>=0, "BuildLinearModel: N<0"); alglib.ap.assert(nx>0, "BuildLinearModel: NX<=0"); alglib.ap.assert(ny>0, "BuildLinearModel: NY<=0"); // // Handle degenerate case (N=0) // result = true; v = new double[ny, nx+1]; if( n==0 ) { for(j=0; j<=nx; j++) { for(i=0; i<=ny-1; i++) { v[i,j] = 0; } } return result; } // // Allocate temporaries // tmpy = new double[n]; // // General linear model. // if( modeltype==1 ) { // // Calculate scaling/shifting, transform variables, prepare LLS problem // a = new double[n, nx+1]; shifting = new double[nx]; scaling = 0; for(i=0; i<=nx-1; i++) { mn = x[0,i]; mx = mn; for(j=1; j<=n-1; j++) { if( (double)(mn)>(double)(x[j,i]) ) { mn = x[j,i]; } if( (double)(mx)<(double)(x[j,i]) ) { mx = x[j,i]; } } scaling = Math.Max(scaling, mx-mn); shifting[i] = 0.5*(mx+mn); } if( (double)(scaling)==(double)(0) ) { scaling = 1; } else { scaling = 0.5*scaling; } for(i=0; i<=n-1; i++) { for(j=0; j<=nx-1; j++) { a[i,j] = (x[i,j]-shifting[j])/scaling; } } for(i=0; i<=n-1; i++) { a[i,nx] = 1; } // // Solve linear system in transformed variables, make backward // for(i=0; i<=ny-1; i++) { for(j=0; j<=n-1; j++) { tmpy[j] = y[j,i]; } lsfit.lsfitlinear(tmpy, a, n, nx+1, ref info, ref c, rep, _params); if( info<=0 ) { result = false; return result; } for(j=0; j<=nx-1; j++) { v[i,j] = c[j]/scaling; } v[i,nx] = c[nx]; for(j=0; j<=nx-1; j++) { v[i,nx] = v[i,nx]-shifting[j]*v[i,j]; } for(j=0; j<=n-1; j++) { for(k=0; k<=nx-1; k++) { y[j,i] = y[j,i]-x[j,k]*v[i,k]; } y[j,i] = y[j,i]-v[i,nx]; } } return result; } // // Constant model, very simple // if( modeltype==2 ) { for(i=0; i<=ny-1; i++) { for(j=0; j<=nx; j++) { v[i,j] = 0; } for(j=0; j<=n-1; j++) { v[i,nx] = v[i,nx]+y[j,i]; } if( n>0 ) { v[i,nx] = v[i,nx]/n; } for(j=0; j<=n-1; j++) { y[j,i] = y[j,i]-v[i,nx]; } } return result; } // // Zero model // alglib.ap.assert(modeltype==3, "BuildLinearModel: unknown model type"); for(i=0; i<=ny-1; i++) { for(j=0; j<=nx; j++) { v[i,j] = 0; } } return result; } /************************************************************************* Reallocates calcBuf if necessary, reuses previously allocated space if possible. -- ALGLIB -- Copyright 20.06.2016 by Sergey Bochkanov *************************************************************************/ private static void allocatecalcbuffer(rbfv2model s, rbfv2calcbuffer buf, alglib.xparams _params) { if( alglib.ap.len(buf.x)=localnodessize+2, "ConvertTreeRec: integrity check failed"); alglib.ap.assert(alglib.ap.len(localcw)>=localcwsize+cnt*(nx+ny), "ConvertTreeRec: integrity check failed"); localnodes[localnodessize+0] = cnt; localnodes[localnodessize+1] = cwbase+localcwsize; localnodessize = localnodessize+2; for(i=0; i<=cnt-1; i++) { for(j=0; j<=nx+ny-1; j++) { localcw[localcwsize+i*(nx+ny)+j] = xybuf[i,j]; } } localcwsize = localcwsize+cnt*(nx+ny); return; } // // Split node // if( nodetype==1 ) { nearestneighbor.kdtreeexploresplit(curtree, nodeoffset, ref d, ref s, ref nodele, ref nodege, _params); alglib.ap.assert(alglib.ap.len(localnodes)>=localnodessize+maxnodesize, "ConvertTreeRec: integrity check failed"); alglib.ap.assert(alglib.ap.len(localsplits)>=localsplitssize+1, "ConvertTreeRec: integrity check failed"); oldnodessize = localnodessize; localnodes[localnodessize+0] = 0; localnodes[localnodessize+1] = d; localnodes[localnodessize+2] = splitsbase+localsplitssize; localnodes[localnodessize+3] = -1; localnodes[localnodessize+4] = -1; localnodessize = localnodessize+5; localsplits[localsplitssize+0] = s; localsplitssize = localsplitssize+1; localnodes[oldnodessize+3] = nodesbase+localnodessize; converttreerec(curtree, n, nx, ny, nodele, nodesbase, splitsbase, cwbase, localnodes, ref localnodessize, localsplits, ref localsplitssize, localcw, ref localcwsize, ref xybuf, _params); localnodes[oldnodessize+4] = nodesbase+localnodessize; converttreerec(curtree, n, nx, ny, nodege, nodesbase, splitsbase, cwbase, localnodes, ref localnodessize, localsplits, ref localsplitssize, localcw, ref localcwsize, ref xybuf, _params); return; } // // Integrity error // alglib.ap.assert(false, "ConvertTreeRec: integrity check failed"); } /************************************************************************* This function performs partial calculation of hierarchical model: given evaluation point X and partially computed value Y, it updates Y by values computed using part of multi-tree given by RootIdx. INPUT PARAMETERS: S - V2 model Buf - calc-buffer, this function uses following fields: * Buf.CurBoxMin - should be set by caller * Buf.CurBoxMax - should be set by caller * Buf.CurDist2 - squared distance from X to current bounding box, should be set by caller RootIdx - offset of partial kd-tree InvR2 - 1/R^2, where R is basis function radius QueryR2 - squared query radius, usually it is (R*FarRadius(BasisFunction))^2 X - evaluation point, array[NX] Y - partial value, array[NY] OUTPUT PARAMETERS Y - updated partial value -- ALGLIB -- Copyright 20.06.2016 by Bochkanov Sergey *************************************************************************/ private static void partialcalcrec(rbfv2model s, rbfv2calcbuffer buf, int rootidx, double invr2, double queryr2, double[] x, double[] y, alglib.xparams _params) { int i = 0; int j = 0; double ptdist2 = 0; double v = 0; double v0 = 0; double v1 = 0; int cwoffs = 0; int cwcnt = 0; int itemoffs = 0; double arg = 0; double val = 0; int d = 0; double split = 0; int childle = 0; int childge = 0; int childoffs = 0; bool updatemin = new bool(); double prevdist2 = 0; double t1 = 0; int nx = 0; int ny = 0; nx = s.nx; ny = s.ny; // // Helps to avoid spurious warnings // val = 0; // // Leaf node. // if( s.kdnodes[rootidx]>0 ) { cwcnt = s.kdnodes[rootidx+0]; cwoffs = s.kdnodes[rootidx+1]; for(i=0; i<=cwcnt-1; i++) { // // Calculate distance // itemoffs = cwoffs+i*(nx+ny); ptdist2 = 0; for(j=0; j<=nx-1; j++) { v = s.cw[itemoffs+j]-x[j]; ptdist2 = ptdist2+v*v; } // // Skip points if distance too large // if( ptdist2>=queryr2 ) { continue; } // // Update Y // arg = ptdist2*invr2; if( s.bf==0 ) { val = Math.Exp(-arg); } else { if( s.bf==1 ) { val = rbfv2basisfunc(s.bf, arg, _params); } else { alglib.ap.assert(false, "PartialCalcRec: integrity check failed"); } } itemoffs = itemoffs+nx; for(j=0; j<=ny-1; j++) { y[j] = y[j]+val*s.cw[itemoffs+j]; } } return; } // // Simple split // if( s.kdnodes[rootidx]==0 ) { // // Load: // * D dimension to split // * Split split position // * ChildLE, ChildGE - indexes of childs // d = s.kdnodes[rootidx+1]; split = s.kdsplits[s.kdnodes[rootidx+2]]; childle = s.kdnodes[rootidx+3]; childge = s.kdnodes[rootidx+4]; // // Navigate through childs // for(i=0; i<=1; i++) { // // Select child to process: // * ChildOffs current child offset in Nodes[] // * UpdateMin whether minimum or maximum value // of bounding box is changed on update // updatemin = i!=0; if( i==0 ) { childoffs = childle; } else { childoffs = childge; } // // Update bounding box and current distance // prevdist2 = buf.curdist2; t1 = x[d]; if( updatemin ) { v = buf.curboxmin[d]; if( t1<=split ) { v0 = v-t1; if( v0<0 ) { v0 = 0; } v1 = split-t1; buf.curdist2 = buf.curdist2-v0*v0+v1*v1; } buf.curboxmin[d] = split; } else { v = buf.curboxmax[d]; if( t1>=split ) { v0 = t1-v; if( v0<0 ) { v0 = 0; } v1 = t1-split; buf.curdist2 = buf.curdist2-v0*v0+v1*v1; } buf.curboxmax[d] = split; } // // Decide: to dive into cell or not to dive // if( buf.curdist20 ) { cwcnt = s.kdnodes[rootidx+0]; cwoffs = s.kdnodes[rootidx+1]; for(i0=0; i0<=cwcnt-1; i0++) { // // Calculate partial distance (components from 1 to NX-1) // itemoffs = cwoffs+i0*(nx+ny); partialptdist2 = 0; for(j=1; j<=nx-1; j++) { v = s.cw[itemoffs+j]-cx[j]; partialptdist2 = partialptdist2+v*v; } // // Process each element of the row // for(i1=0; i1<=rowsize-1; i1++) { if( rf[i1] ) { // // Calculate distance // v = s.cw[itemoffs]-rx[i1]; ptdist2 = partialptdist2+v*v; // // Skip points if distance too large // if( ptdist2>=rfar2 ) { continue; } // // Update Y // val = rbfv2basisfunc(s.bf, ptdist2*invr2, _params); woffs = itemoffs+nx; for(j=0; j<=ny-1; j++) { ry[j+i1*ny] = ry[j+i1*ny]+val*s.cw[woffs+j]; } } } } return; } // // Simple split // if( s.kdnodes[rootidx]==0 ) { // // Load: // * D dimension to split // * Split split position // * ChildLE, ChildGE - indexes of childs // d = s.kdnodes[rootidx+1]; split = s.kdsplits[s.kdnodes[rootidx+2]]; childle = s.kdnodes[rootidx+3]; childge = s.kdnodes[rootidx+4]; // // Navigate through childs // for(i=0; i<=1; i++) { // // Select child to process: // * ChildOffs current child offset in Nodes[] // * UpdateMin whether minimum or maximum value // of bounding box is changed on update // updatemin = i!=0; if( i==0 ) { childoffs = childle; } else { childoffs = childge; } // // Update bounding box and current distance // prevdist2 = buf.curdist2; t1 = cx[d]; if( updatemin ) { v = buf.curboxmin[d]; if( t1<=split ) { v0 = v-t1; if( v0<0 ) { v0 = 0; } v1 = split-t1; buf.curdist2 = buf.curdist2-v0*v0+v1*v1; } buf.curboxmin[d] = split; } else { v = buf.curboxmax[d]; if( t1>=split ) { v0 = t1-v; if( v0<0 ) { v0 = 0; } v1 = t1-split; buf.curdist2 = buf.curdist2-v0*v0+v1*v1; } buf.curboxmax[d] = split; } // // Decide: to dive into cell or not to dive // if( buf.curdist2(double)(buf.curboxmax[j]) ) { buf.curdist2 = buf.curdist2+math.sqr(x[j]-buf.curboxmax[j]); } } } } /************************************************************************* This function performs partial (for just one subtree of multi-tree) query for neighbors located in R-sphere around X. It returns squared distances from X to points and offsets in S.CW[] array for points being found. INPUT PARAMETERS: kdNodes, kdSplits, CW, NX, NY - corresponding fields of V2 model Buf - calc-buffer, this function uses following fields: * Buf.CurBoxMin - should be set by caller * Buf.CurBoxMax - should be set by caller * Buf.CurDist2 - squared distance from X to current bounding box, should be set by caller You may use preparepartialquery() function to initialize these fields. RootIdx - offset of partial kd-tree QueryR2 - squared query radius X - array[NX], point being queried R2 - preallocated output buffer; it is caller's responsibility to make sure that R2 has enough space. Offs - preallocated output buffer; it is caller's responsibility to make sure that Offs has enough space. K - MUST BE ZERO ON INITIAL CALL. This variable is incremented, not set. So, any no-zero value will result in the incorrect points count being returned. OUTPUT PARAMETERS R2 - squared distances in first K elements Offs - offsets in S.CW in first K elements K - points count -- ALGLIB -- Copyright 20.06.2016 by Bochkanov Sergey *************************************************************************/ private static void partialqueryrec(int[] kdnodes, double[] kdsplits, double[] cw, int nx, int ny, rbfv2calcbuffer buf, int rootidx, double queryr2, double[] x, double[] r2, int[] offs, ref int k, alglib.xparams _params) { int i = 0; int j = 0; double ptdist2 = 0; double v = 0; int cwoffs = 0; int cwcnt = 0; int itemoffs = 0; int d = 0; double split = 0; int childle = 0; int childge = 0; int childoffs = 0; bool updatemin = new bool(); double prevdist2 = 0; double t1 = 0; // // Leaf node. // if( kdnodes[rootidx]>0 ) { cwcnt = kdnodes[rootidx+0]; cwoffs = kdnodes[rootidx+1]; for(i=0; i<=cwcnt-1; i++) { // // Calculate distance // itemoffs = cwoffs+i*(nx+ny); ptdist2 = 0; for(j=0; j<=nx-1; j++) { v = cw[itemoffs+j]-x[j]; ptdist2 = ptdist2+v*v; } // // Skip points if distance too large // if( (double)(ptdist2)>=(double)(queryr2) ) { continue; } // // Output // r2[k] = ptdist2; offs[k] = itemoffs; k = k+1; } return; } // // Simple split // if( kdnodes[rootidx]==0 ) { // // Load: // * D dimension to split // * Split split position // * ChildLE, ChildGE - indexes of childs // d = kdnodes[rootidx+1]; split = kdsplits[kdnodes[rootidx+2]]; childle = kdnodes[rootidx+3]; childge = kdnodes[rootidx+4]; // // Navigate through childs // for(i=0; i<=1; i++) { // // Select child to process: // * ChildOffs current child offset in Nodes[] // * UpdateMin whether minimum or maximum value // of bounding box is changed on update // updatemin = i!=0; if( i==0 ) { childoffs = childle; } else { childoffs = childge; } // // Update bounding box and current distance // prevdist2 = buf.curdist2; t1 = x[d]; if( updatemin ) { v = buf.curboxmin[d]; if( (double)(t1)<=(double)(split) ) { buf.curdist2 = buf.curdist2-math.sqr(Math.Max(v-t1, 0))+math.sqr(split-t1); } buf.curboxmin[d] = split; } else { v = buf.curboxmax[d]; if( (double)(t1)>=(double)(split) ) { buf.curdist2 = buf.curdist2-math.sqr(Math.Max(t1-v, 0))+math.sqr(t1-split); } buf.curboxmax[d] = split; } // // Decide: to dive into cell or not to dive // if( (double)(buf.curdist2)<(double)(queryr2) ) { partialqueryrec(kdnodes, kdsplits, cw, nx, ny, buf, childoffs, queryr2, x, r2, offs, ref k, _params); } // // Restore bounding box and distance // if( updatemin ) { buf.curboxmin[d] = v; } else { buf.curboxmax[d] = v; } buf.curdist2 = prevdist2; } return; } // // Integrity failure // alglib.ap.assert(false, "PartialQueryRec: integrity check failed"); } /************************************************************************* This function performs partial (for just one subtree of multi-tree) counting of neighbors located in R-sphere around X. This function does not guarantee consistency of results with other partial queries, it should be used only to get approximate estimates (well, we do not use approximate algorithms, but rounding errors may give us inconsistent results in just-at-the-boundary cases). INPUT PARAMETERS: kdNodes, kdSplits, CW, NX, NY - corresponding fields of V2 model Buf - calc-buffer, this function uses following fields: * Buf.CurBoxMin - should be set by caller * Buf.CurBoxMax - should be set by caller * Buf.CurDist2 - squared distance from X to current bounding box, should be set by caller You may use preparepartialquery() function to initialize these fields. RootIdx - offset of partial kd-tree QueryR2 - squared query radius X - array[NX], point being queried RESULT: points count -- ALGLIB -- Copyright 20.06.2016 by Bochkanov Sergey *************************************************************************/ private static int partialcountrec(int[] kdnodes, double[] kdsplits, double[] cw, int nx, int ny, rbfv2calcbuffer buf, int rootidx, double queryr2, double[] x, alglib.xparams _params) { int result = 0; int i = 0; int j = 0; double ptdist2 = 0; double v = 0; int cwoffs = 0; int cwcnt = 0; int itemoffs = 0; int d = 0; double split = 0; int childle = 0; int childge = 0; int childoffs = 0; bool updatemin = new bool(); double prevdist2 = 0; double t1 = 0; result = 0; // // Leaf node. // if( kdnodes[rootidx]>0 ) { cwcnt = kdnodes[rootidx+0]; cwoffs = kdnodes[rootidx+1]; for(i=0; i<=cwcnt-1; i++) { // // Calculate distance // itemoffs = cwoffs+i*(nx+ny); ptdist2 = 0; for(j=0; j<=nx-1; j++) { v = cw[itemoffs+j]-x[j]; ptdist2 = ptdist2+v*v; } // // Skip points if distance too large // if( (double)(ptdist2)>=(double)(queryr2) ) { continue; } // // Output // result = result+1; } return result; } // // Simple split // if( kdnodes[rootidx]==0 ) { // // Load: // * D dimension to split // * Split split position // * ChildLE, ChildGE - indexes of childs // d = kdnodes[rootidx+1]; split = kdsplits[kdnodes[rootidx+2]]; childle = kdnodes[rootidx+3]; childge = kdnodes[rootidx+4]; // // Navigate through childs // for(i=0; i<=1; i++) { // // Select child to process: // * ChildOffs current child offset in Nodes[] // * UpdateMin whether minimum or maximum value // of bounding box is changed on update // updatemin = i!=0; if( i==0 ) { childoffs = childle; } else { childoffs = childge; } // // Update bounding box and current distance // prevdist2 = buf.curdist2; t1 = x[d]; if( updatemin ) { v = buf.curboxmin[d]; if( (double)(t1)<=(double)(split) ) { buf.curdist2 = buf.curdist2-math.sqr(Math.Max(v-t1, 0))+math.sqr(split-t1); } buf.curboxmin[d] = split; } else { v = buf.curboxmax[d]; if( (double)(t1)>=(double)(split) ) { buf.curdist2 = buf.curdist2-math.sqr(Math.Max(t1-v, 0))+math.sqr(t1-split); } buf.curboxmax[d] = split; } // // Decide: to dive into cell or not to dive // if( (double)(buf.curdist2)<(double)(queryr2) ) { result = result+partialcountrec(kdnodes, kdsplits, cw, nx, ny, buf, childoffs, queryr2, x, _params); } // // Restore bounding box and distance // if( updatemin ) { buf.curboxmin[d] = v; } else { buf.curboxmax[d] = v; } buf.curdist2 = prevdist2; } return result; } // // Integrity failure // alglib.ap.assert(false, "PartialCountRec: integrity check failed"); return result; } /************************************************************************* This function performs partial (for just one subtree of multi-tree) unpack for RBF model. It appends center coordinates, weights and per-dimension radii (according to current scaling) to preallocated output array. INPUT PARAMETERS: kdNodes, kdSplits, CW, S, NX, NY - corresponding fields of V2 model RootIdx - offset of partial kd-tree R - radius for current partial tree XWR - preallocated output buffer; it is caller's responsibility to make sure that XWR has enough space. First K rows are already occupied. K - number of already occupied rows in XWR. OUTPUT PARAMETERS XWR - updated XWR K - updated rows count -- ALGLIB -- Copyright 20.06.2016 by Bochkanov Sergey *************************************************************************/ private static void partialunpackrec(int[] kdnodes, double[] kdsplits, double[] cw, double[] s, int nx, int ny, int rootidx, double r, double[,] xwr, ref int k, alglib.xparams _params) { int i = 0; int j = 0; int childle = 0; int childge = 0; int itemoffs = 0; int cwoffs = 0; int cwcnt = 0; // // Leaf node. // if( kdnodes[rootidx]>0 ) { cwcnt = kdnodes[rootidx+0]; cwoffs = kdnodes[rootidx+1]; for(i=0; i<=cwcnt-1; i++) { itemoffs = cwoffs+i*(nx+ny); for(j=0; j<=nx+ny-1; j++) { xwr[k,j] = cw[itemoffs+j]; } for(j=0; j<=nx-1; j++) { xwr[k,j] = xwr[k,j]*s[j]; } for(j=0; j<=nx-1; j++) { xwr[k,nx+ny+j] = r*s[j]; } k = k+1; } return; } // // Simple split // if( kdnodes[rootidx]==0 ) { // // Load: // * ChildLE, ChildGE - indexes of childs // childle = kdnodes[rootidx+3]; childge = kdnodes[rootidx+4]; // // Process both parts of split // partialunpackrec(kdnodes, kdsplits, cw, s, nx, ny, childle, r, xwr, ref k, _params); partialunpackrec(kdnodes, kdsplits, cw, s, nx, ny, childge, r, xwr, ref k, _params); return; } // // Integrity failure // alglib.ap.assert(false, "PartialUnpackRec: integrity check failed"); } /************************************************************************* This function returns size of design matrix row for evaluation point X0, given: * query radius multiplier (either RBFV2NearRadius() or RBFV2FarRadius()) * hierarchy level: value in [0,NH) for single-level model, or negative value for multilevel model (all levels of hierarchy in single matrix, like one used by nonnegative RBF) INPUT PARAMETERS: kdNodes, kdSplits, CW, Ri, kdRoots, kdBoxMin, kdBoxMax, NX, NY, NH - corresponding fields of V2 model Level - value in [0,NH) for single-level design matrix, negative value for multilevel design matrix RCoeff - radius coefficient, either RBFV2NearRadius() or RBFV2FarRadius() X0 - query point CalcBuf - buffer for PreparePartialQuery(), allocated by caller RESULT: row size -- ALGLIB -- Copyright 28.09.2016 by Bochkanov Sergey *************************************************************************/ private static int designmatrixrowsize(int[] kdnodes, double[] kdsplits, double[] cw, double[] ri, int[] kdroots, double[] kdboxmin, double[] kdboxmax, int nx, int ny, int nh, int level, double rcoeff, double[] x0, rbfv2calcbuffer calcbuf, alglib.xparams _params) { int result = 0; int dummy = 0; int levelidx = 0; int level0 = 0; int level1 = 0; double curradius2 = 0; alglib.ap.assert(nh>0, "DesignMatrixRowSize: integrity failure"); if( level>=0 ) { level0 = level; level1 = level; } else { level0 = 0; level1 = nh-1; } result = 0; for(levelidx=level0; levelidx<=level1; levelidx++) { curradius2 = math.sqr(ri[levelidx]*rcoeff); preparepartialquery(x0, kdboxmin, kdboxmax, nx, calcbuf, ref dummy, _params); result = result+partialcountrec(kdnodes, kdsplits, cw, nx, ny, calcbuf, kdroots[levelidx], curradius2, x0, _params); } return result; } /************************************************************************* This function generates design matrix row for evaluation point X0, given: * query radius multiplier (either RBFV2NearRadius() or RBFV2FarRadius()) * hierarchy level: value in [0,NH) for single-level model, or negative value for multilevel model (all levels of hierarchy in single matrix, like one used by nonnegative RBF) INPUT PARAMETERS: kdNodes, kdSplits, CW, Ri, kdRoots, kdBoxMin, kdBoxMax, NX, NY, NH - corresponding fields of V2 model CWRange - internal array[NH+1] used by RBF construction function, stores ranges of CW occupied by NH trees. Level - value in [0,NH) for single-level design matrix, negative value for multilevel design matrix BF - basis function type RCoeff - radius coefficient, either RBFV2NearRadius() or RBFV2FarRadius() RowsPerPoint-equal to: * 1 for unpenalized regression model * 1+NX for basic form of nonsmoothness penalty Penalty - nonsmoothness penalty coefficient X0 - query point CalcBuf - buffer for PreparePartialQuery(), allocated by caller R2 - preallocated temporary buffer, size is at least NPoints; it is caller's responsibility to make sure that R2 has enough space. Offs - preallocated temporary buffer; size is at least NPoints; it is caller's responsibility to make sure that Offs has enough space. K - MUST BE ZERO ON INITIAL CALL. This variable is incremented, not set. So, any no-zero value will result in the incorrect points count being returned. RowIdx - preallocated array, at least RowSize elements RowVal - preallocated array, at least RowSize*RowsPerPoint elements RESULT: RowIdx - RowSize elements are filled with column indexes of non-zero design matrix entries RowVal - RowSize*RowsPerPoint elements are filled with design matrix values, with column RowIdx[0] being stored in first RowsPerPoint elements of RowVal, column RowIdx[1] being stored in next RowsPerPoint elements, and so on. First element in contiguous set of RowsPerPoint elements corresponds to RowSize - number of columns per row -- ALGLIB -- Copyright 28.09.2016 by Bochkanov Sergey *************************************************************************/ private static void designmatrixgeneraterow(int[] kdnodes, double[] kdsplits, double[] cw, double[] ri, int[] kdroots, double[] kdboxmin, double[] kdboxmax, int[] cwrange, int nx, int ny, int nh, int level, int bf, double rcoeff, int rowsperpoint, double penalty, double[] x0, rbfv2calcbuffer calcbuf, double[] tmpr2, int[] tmpoffs, int[] rowidx, double[] rowval, ref int rowsize, alglib.xparams _params) { int j = 0; int k = 0; int cnt = 0; int levelidx = 0; int level0 = 0; int level1 = 0; double invri2 = 0; double curradius2 = 0; double val = 0; double dval = 0; double d2val = 0; rowsize = 0; alglib.ap.assert(nh>0, "DesignMatrixGenerateRow: integrity failure (a)"); alglib.ap.assert(rowsperpoint==1 || rowsperpoint==1+nx, "DesignMatrixGenerateRow: integrity failure (b)"); if( level>=0 ) { level0 = level; level1 = level; } else { level0 = 0; level1 = nh-1; } rowsize = 0; for(levelidx=level0; levelidx<=level1; levelidx++) { curradius2 = math.sqr(ri[levelidx]*rcoeff); invri2 = 1/math.sqr(ri[levelidx]); preparepartialquery(x0, kdboxmin, kdboxmax, nx, calcbuf, ref cnt, _params); partialqueryrec(kdnodes, kdsplits, cw, nx, ny, calcbuf, kdroots[levelidx], curradius2, x0, tmpr2, tmpoffs, ref cnt, _params); alglib.ap.assert(alglib.ap.len(tmpr2)>=cnt, "DesignMatrixRowSize: integrity failure (c)"); alglib.ap.assert(alglib.ap.len(tmpoffs)>=cnt, "DesignMatrixRowSize: integrity failure (d)"); alglib.ap.assert(alglib.ap.len(rowidx)>=rowsize+cnt, "DesignMatrixRowSize: integrity failure (e)"); alglib.ap.assert(alglib.ap.len(rowval)>=rowsperpoint*(rowsize+cnt), "DesignMatrixRowSize: integrity failure (f)"); for(j=0; j<=cnt-1; j++) { // // Generate element corresponding to fitting error. // Store derivative information which may be required later. // alglib.ap.assert((tmpoffs[j]-cwrange[level0])%(nx+ny)==0, "DesignMatrixRowSize: integrity failure (g)"); rbfv2basisfuncdiff2(bf, tmpr2[j]*invri2, ref val, ref dval, ref d2val, _params); rowidx[rowsize+j] = (tmpoffs[j]-cwrange[level0])/(nx+ny); rowval[(rowsize+j)*rowsperpoint+0] = val; if( rowsperpoint==1 ) { continue; } // // Generate elements corresponding to nonsmoothness penalty // alglib.ap.assert(rowsperpoint==1+nx, "DesignMatrixRowSize: integrity failure (h)"); for(k=0; k<=nx-1; k++) { rowval[(rowsize+j)*rowsperpoint+1+k] = penalty*(dval*2*invri2+d2val*math.sqr(2*(x0[k]-cw[tmpoffs[j]+k])*invri2)); } } // // Update columns counter // rowsize = rowsize+cnt; } } /************************************************************************* This function fills RBF model by zeros. -- ALGLIB -- Copyright 17.11.2018 by Bochkanov Sergey *************************************************************************/ private static void zerofill(rbfv2model s, int nx, int ny, int bf, alglib.xparams _params) { int i = 0; int j = 0; s.bf = bf; s.nh = 0; s.ri = new double[0]; s.s = new double[0]; s.kdroots = new int[0]; s.kdnodes = new int[0]; s.kdsplits = new double[0]; s.kdboxmin = new double[0]; s.kdboxmax = new double[0]; s.cw = new double[0]; s.v = new double[ny, nx+1]; for(i=0; i<=ny-1; i++) { for(j=0; j<=nx; j++) { s.v[i,j] = 0; } } } } public class spline2d { /************************************************************************* 2-dimensional spline inteprolant *************************************************************************/ public class spline2dinterpolant : apobject { public int stype; public int n; public int m; public int d; public double[] x; public double[] y; public double[] f; public spline2dinterpolant() { init(); } public override void init() { x = new double[0]; y = new double[0]; f = new double[0]; } public override alglib.apobject make_copy() { spline2dinterpolant _result = new spline2dinterpolant(); _result.stype = stype; _result.n = n; _result.m = m; _result.d = d; _result.x = (double[])x.Clone(); _result.y = (double[])y.Clone(); _result.f = (double[])f.Clone(); return _result; } }; /************************************************************************* Nonlinear least squares solver used to fit 2D splines to data *************************************************************************/ public class spline2dbuilder : apobject { public int priorterm; public double priortermval; public int areatype; public double xa; public double xb; public double ya; public double yb; public int gridtype; public int kx; public int ky; public double smoothing; public int nlayers; public int solvertype; public double lambdabase; public double[] xy; public int npoints; public int d; public double sx; public double sy; public bool adddegreeoffreedom; public int interfacesize; public int lsqrcnt; public int maxcoresize; public spline2dbuilder() { init(); } public override void init() { xy = new double[0]; } public override alglib.apobject make_copy() { spline2dbuilder _result = new spline2dbuilder(); _result.priorterm = priorterm; _result.priortermval = priortermval; _result.areatype = areatype; _result.xa = xa; _result.xb = xb; _result.ya = ya; _result.yb = yb; _result.gridtype = gridtype; _result.kx = kx; _result.ky = ky; _result.smoothing = smoothing; _result.nlayers = nlayers; _result.solvertype = solvertype; _result.lambdabase = lambdabase; _result.xy = (double[])xy.Clone(); _result.npoints = npoints; _result.d = d; _result.sx = sx; _result.sy = sy; _result.adddegreeoffreedom = adddegreeoffreedom; _result.interfacesize = interfacesize; _result.lsqrcnt = lsqrcnt; _result.maxcoresize = maxcoresize; return _result; } }; /************************************************************************* Spline 2D fitting report: rmserror RMS error avgerror average error maxerror maximum error r2 coefficient of determination, R-squared, 1-RSS/TSS *************************************************************************/ public class spline2dfitreport : apobject { public double rmserror; public double avgerror; public double maxerror; public double r2; public spline2dfitreport() { init(); } public override void init() { } public override alglib.apobject make_copy() { spline2dfitreport _result = new spline2dfitreport(); _result.rmserror = rmserror; _result.avgerror = avgerror; _result.maxerror = maxerror; _result.r2 = r2; return _result; } }; /************************************************************************* Design matrix stored in batch/block sparse format. The idea is that design matrix for bicubic spline fitting has very regular structure: 1. I-th row has non-zero entries in elements with indexes starting from some IDX, and including: IDX, IDX+1, IDX+2, IDX+3, IDX+KX+0, IDX+KX+1, and so on, up to 16 elements in total. Rows corresponding to dataset points have 16 non-zero elements, rows corresponding to nonlinearity penalty have 9 non-zero elements, and rows of regularizer have 1 element. For the sake of simplicity, we can use 16 elements for dataset rows and penalty rows, and process regularizer explicitly. 2. points located in the same cell of the grid have same pattern of non-zeros, so we can use dense Level 2 and Level 3 linear algebra to work with such matrices. *************************************************************************/ public class spline2dxdesignmatrix : apobject { public int blockwidth; public int kx; public int ky; public int npoints; public int nrows; public int ndenserows; public int ndensebatches; public int d; public int maxbatch; public double[,] vals; public int[] batches; public int[] batchbases; public double lambdareg; public double[] tmp0; public double[] tmp1; public double[,] tmp2; public spline2dxdesignmatrix() { init(); } public override void init() { vals = new double[0,0]; batches = new int[0]; batchbases = new int[0]; tmp0 = new double[0]; tmp1 = new double[0]; tmp2 = new double[0,0]; } public override alglib.apobject make_copy() { spline2dxdesignmatrix _result = new spline2dxdesignmatrix(); _result.blockwidth = blockwidth; _result.kx = kx; _result.ky = ky; _result.npoints = npoints; _result.nrows = nrows; _result.ndenserows = ndenserows; _result.ndensebatches = ndensebatches; _result.d = d; _result.maxbatch = maxbatch; _result.vals = (double[,])vals.Clone(); _result.batches = (int[])batches.Clone(); _result.batchbases = (int[])batchbases.Clone(); _result.lambdareg = lambdareg; _result.tmp0 = (double[])tmp0.Clone(); _result.tmp1 = (double[])tmp1.Clone(); _result.tmp2 = (double[,])tmp2.Clone(); return _result; } }; /************************************************************************* Temporaries for BlockLLS solver *************************************************************************/ public class spline2dblockllsbuf : apobject { public linlsqr.linlsqrstate solver; public linlsqr.linlsqrreport solverrep; public double[,] blockata; public double[,] trsmbuf2; public double[,] cholbuf2; public double[] cholbuf1; public double[] tmp0; public double[] tmp1; public spline2dblockllsbuf() { init(); } public override void init() { solver = new linlsqr.linlsqrstate(); solverrep = new linlsqr.linlsqrreport(); blockata = new double[0,0]; trsmbuf2 = new double[0,0]; cholbuf2 = new double[0,0]; cholbuf1 = new double[0]; tmp0 = new double[0]; tmp1 = new double[0]; } public override alglib.apobject make_copy() { spline2dblockllsbuf _result = new spline2dblockllsbuf(); _result.solver = (linlsqr.linlsqrstate)solver.make_copy(); _result.solverrep = (linlsqr.linlsqrreport)solverrep.make_copy(); _result.blockata = (double[,])blockata.Clone(); _result.trsmbuf2 = (double[,])trsmbuf2.Clone(); _result.cholbuf2 = (double[,])cholbuf2.Clone(); _result.cholbuf1 = (double[])cholbuf1.Clone(); _result.tmp0 = (double[])tmp0.Clone(); _result.tmp1 = (double[])tmp1.Clone(); return _result; } }; /************************************************************************* Temporaries for FastDDM solver *************************************************************************/ public class spline2dfastddmbuf : apobject { public spline2dxdesignmatrix xdesignmatrix; public double[] tmp0; public double[] tmpz; public spline2dfitreport dummyrep; public spline2dinterpolant localmodel; public spline2dblockllsbuf blockllsbuf; public spline2dfastddmbuf() { init(); } public override void init() { xdesignmatrix = new spline2dxdesignmatrix(); tmp0 = new double[0]; tmpz = new double[0]; dummyrep = new spline2dfitreport(); localmodel = new spline2dinterpolant(); blockllsbuf = new spline2dblockllsbuf(); } public override alglib.apobject make_copy() { spline2dfastddmbuf _result = new spline2dfastddmbuf(); _result.xdesignmatrix = (spline2dxdesignmatrix)xdesignmatrix.make_copy(); _result.tmp0 = (double[])tmp0.Clone(); _result.tmpz = (double[])tmpz.Clone(); _result.dummyrep = (spline2dfitreport)dummyrep.make_copy(); _result.localmodel = (spline2dinterpolant)localmodel.make_copy(); _result.blockllsbuf = (spline2dblockllsbuf)blockllsbuf.make_copy(); return _result; } }; public const double cholreg = 1.0E-12; public const double lambdaregblocklls = 1.0E-6; public const double lambdaregfastddm = 1.0E-4; public const double lambdadecay = 0.5; /************************************************************************* This subroutine calculates the value of the bilinear or bicubic spline at the given point X. Input parameters: C - 2D spline object. Built by spline2dbuildbilinearv or spline2dbuildbicubicv. X, Y- point Result: S(x,y) -- ALGLIB PROJECT -- Copyright 05.07.2007 by Bochkanov Sergey *************************************************************************/ public static double spline2dcalc(spline2dinterpolant c, double x, double y, alglib.xparams _params) { double result = 0; int ix = 0; int iy = 0; int l = 0; int r = 0; int h = 0; double t = 0; double dt = 0; double u = 0; double du = 0; double y1 = 0; double y2 = 0; double y3 = 0; double y4 = 0; int s1 = 0; int s2 = 0; int s3 = 0; int s4 = 0; int sfx = 0; int sfy = 0; int sfxy = 0; double t2 = 0; double t3 = 0; double u2 = 0; double u3 = 0; double ht00 = 0; double ht01 = 0; double ht10 = 0; double ht11 = 0; double hu00 = 0; double hu01 = 0; double hu10 = 0; double hu11 = 0; alglib.ap.assert(c.stype==-1 || c.stype==-3, "Spline2DCalc: incorrect C (incorrect parameter C.SType)"); alglib.ap.assert(math.isfinite(x) && math.isfinite(y), "Spline2DCalc: X or Y contains NaN or Infinite value"); if( c.d!=1 ) { result = 0; return result; } // // Determine evaluation interval // l = 0; r = c.n-1; while( l!=r-1 ) { h = (l+r)/2; if( (double)(c.x[h])>=(double)(x) ) { r = h; } else { l = h; } } dt = 1.0/(c.x[l+1]-c.x[l]); t = (x-c.x[l])*dt; ix = l; l = 0; r = c.m-1; while( l!=r-1 ) { h = (l+r)/2; if( (double)(c.y[h])>=(double)(y) ) { r = h; } else { l = h; } } du = 1.0/(c.y[l+1]-c.y[l]); u = (y-c.y[l])*du; iy = l; // // Bilinear interpolation // if( c.stype==-1 ) { y1 = c.f[c.n*iy+ix]; y2 = c.f[c.n*iy+(ix+1)]; y3 = c.f[c.n*(iy+1)+(ix+1)]; y4 = c.f[c.n*(iy+1)+ix]; result = (1-t)*(1-u)*y1+t*(1-u)*y2+t*u*y3+(1-t)*u*y4; return result; } // // Bicubic interpolation: // * calculate Hermite basis for dimensions X and Y (variables T and U), // here HTij means basis function whose I-th derivative has value 1 at T=J. // Same for HUij. // * after initial calculation, apply scaling by DT/DU to the basis // * calculate using stored table of second derivatives // alglib.ap.assert(c.stype==-3, "Spline2DCalc: integrity check failed"); sfx = c.n*c.m; sfy = 2*c.n*c.m; sfxy = 3*c.n*c.m; s1 = c.n*iy+ix; s2 = c.n*iy+(ix+1); s3 = c.n*(iy+1)+ix; s4 = c.n*(iy+1)+(ix+1); t2 = t*t; t3 = t*t2; u2 = u*u; u3 = u*u2; ht00 = 2*t3-3*t2+1; ht10 = t3-2*t2+t; ht01 = -(2*t3)+3*t2; ht11 = t3-t2; hu00 = 2*u3-3*u2+1; hu10 = u3-2*u2+u; hu01 = -(2*u3)+3*u2; hu11 = u3-u2; ht10 = ht10/dt; ht11 = ht11/dt; hu10 = hu10/du; hu11 = hu11/du; result = 0; result = result+c.f[s1]*ht00*hu00+c.f[s2]*ht01*hu00+c.f[s3]*ht00*hu01+c.f[s4]*ht01*hu01; result = result+c.f[sfx+s1]*ht10*hu00+c.f[sfx+s2]*ht11*hu00+c.f[sfx+s3]*ht10*hu01+c.f[sfx+s4]*ht11*hu01; result = result+c.f[sfy+s1]*ht00*hu10+c.f[sfy+s2]*ht01*hu10+c.f[sfy+s3]*ht00*hu11+c.f[sfy+s4]*ht01*hu11; result = result+c.f[sfxy+s1]*ht10*hu10+c.f[sfxy+s2]*ht11*hu10+c.f[sfxy+s3]*ht10*hu11+c.f[sfxy+s4]*ht11*hu11; return result; } /************************************************************************* This subroutine calculates the value of the bilinear or bicubic spline at the given point X and its derivatives. Input parameters: C - spline interpolant. X, Y- point Output parameters: F - S(x,y) FX - dS(x,y)/dX FY - dS(x,y)/dY FXY - d2S(x,y)/dXdY -- ALGLIB PROJECT -- Copyright 05.07.2007 by Bochkanov Sergey *************************************************************************/ public static void spline2ddiff(spline2dinterpolant c, double x, double y, ref double f, ref double fx, ref double fy, ref double fxy, alglib.xparams _params) { double t = 0; double dt = 0; double u = 0; double du = 0; int ix = 0; int iy = 0; int l = 0; int r = 0; int h = 0; int s1 = 0; int s2 = 0; int s3 = 0; int s4 = 0; int sfx = 0; int sfy = 0; int sfxy = 0; double y1 = 0; double y2 = 0; double y3 = 0; double y4 = 0; double v0 = 0; double v1 = 0; double v2 = 0; double v3 = 0; double t2 = 0; double t3 = 0; double u2 = 0; double u3 = 0; double ht00 = 0; double ht01 = 0; double ht10 = 0; double ht11 = 0; double hu00 = 0; double hu01 = 0; double hu10 = 0; double hu11 = 0; double dht00 = 0; double dht01 = 0; double dht10 = 0; double dht11 = 0; double dhu00 = 0; double dhu01 = 0; double dhu10 = 0; double dhu11 = 0; f = 0; fx = 0; fy = 0; fxy = 0; alglib.ap.assert(c.stype==-1 || c.stype==-3, "Spline2DDiff: incorrect C (incorrect parameter C.SType)"); alglib.ap.assert(math.isfinite(x) && math.isfinite(y), "Spline2DDiff: X or Y contains NaN or Infinite value"); // // Prepare F, dF/dX, dF/dY, d2F/dXdY // f = 0; fx = 0; fy = 0; fxy = 0; if( c.d!=1 ) { return; } // // Binary search in the [ x[0], ..., x[n-2] ] (x[n-1] is not included) // l = 0; r = c.n-1; while( l!=r-1 ) { h = (l+r)/2; if( (double)(c.x[h])>=(double)(x) ) { r = h; } else { l = h; } } t = (x-c.x[l])/(c.x[l+1]-c.x[l]); dt = 1.0/(c.x[l+1]-c.x[l]); ix = l; // // Binary search in the [ y[0], ..., y[m-2] ] (y[m-1] is not included) // l = 0; r = c.m-1; while( l!=r-1 ) { h = (l+r)/2; if( (double)(c.y[h])>=(double)(y) ) { r = h; } else { l = h; } } u = (y-c.y[l])/(c.y[l+1]-c.y[l]); du = 1.0/(c.y[l+1]-c.y[l]); iy = l; // // Bilinear interpolation // if( c.stype==-1 ) { y1 = c.f[c.n*iy+ix]; y2 = c.f[c.n*iy+(ix+1)]; y3 = c.f[c.n*(iy+1)+(ix+1)]; y4 = c.f[c.n*(iy+1)+ix]; f = (1-t)*(1-u)*y1+t*(1-u)*y2+t*u*y3+(1-t)*u*y4; fx = (-((1-u)*y1)+(1-u)*y2+u*y3-u*y4)*dt; fy = (-((1-t)*y1)-t*y2+t*y3+(1-t)*y4)*du; fxy = (y1-y2+y3-y4)*du*dt; return; } // // Bicubic interpolation // if( c.stype==-3 ) { sfx = c.n*c.m; sfy = 2*c.n*c.m; sfxy = 3*c.n*c.m; s1 = c.n*iy+ix; s2 = c.n*iy+(ix+1); s3 = c.n*(iy+1)+ix; s4 = c.n*(iy+1)+(ix+1); t2 = t*t; t3 = t*t2; u2 = u*u; u3 = u*u2; ht00 = 2*t3-3*t2+1; ht10 = t3-2*t2+t; ht01 = -(2*t3)+3*t2; ht11 = t3-t2; hu00 = 2*u3-3*u2+1; hu10 = u3-2*u2+u; hu01 = -(2*u3)+3*u2; hu11 = u3-u2; ht10 = ht10/dt; ht11 = ht11/dt; hu10 = hu10/du; hu11 = hu11/du; dht00 = 6*t2-6*t; dht10 = 3*t2-4*t+1; dht01 = -(6*t2)+6*t; dht11 = 3*t2-2*t; dhu00 = 6*u2-6*u; dhu10 = 3*u2-4*u+1; dhu01 = -(6*u2)+6*u; dhu11 = 3*u2-2*u; dht00 = dht00*dt; dht01 = dht01*dt; dhu00 = dhu00*du; dhu01 = dhu01*du; f = 0; fx = 0; fy = 0; fxy = 0; v0 = c.f[s1]; v1 = c.f[s2]; v2 = c.f[s3]; v3 = c.f[s4]; f = f+v0*ht00*hu00+v1*ht01*hu00+v2*ht00*hu01+v3*ht01*hu01; fx = fx+v0*dht00*hu00+v1*dht01*hu00+v2*dht00*hu01+v3*dht01*hu01; fy = fy+v0*ht00*dhu00+v1*ht01*dhu00+v2*ht00*dhu01+v3*ht01*dhu01; fxy = fxy+v0*dht00*dhu00+v1*dht01*dhu00+v2*dht00*dhu01+v3*dht01*dhu01; v0 = c.f[sfx+s1]; v1 = c.f[sfx+s2]; v2 = c.f[sfx+s3]; v3 = c.f[sfx+s4]; f = f+v0*ht10*hu00+v1*ht11*hu00+v2*ht10*hu01+v3*ht11*hu01; fx = fx+v0*dht10*hu00+v1*dht11*hu00+v2*dht10*hu01+v3*dht11*hu01; fy = fy+v0*ht10*dhu00+v1*ht11*dhu00+v2*ht10*dhu01+v3*ht11*dhu01; fxy = fxy+v0*dht10*dhu00+v1*dht11*dhu00+v2*dht10*dhu01+v3*dht11*dhu01; v0 = c.f[sfy+s1]; v1 = c.f[sfy+s2]; v2 = c.f[sfy+s3]; v3 = c.f[sfy+s4]; f = f+v0*ht00*hu10+v1*ht01*hu10+v2*ht00*hu11+v3*ht01*hu11; fx = fx+v0*dht00*hu10+v1*dht01*hu10+v2*dht00*hu11+v3*dht01*hu11; fy = fy+v0*ht00*dhu10+v1*ht01*dhu10+v2*ht00*dhu11+v3*ht01*dhu11; fxy = fxy+v0*dht00*dhu10+v1*dht01*dhu10+v2*dht00*dhu11+v3*dht01*dhu11; v0 = c.f[sfxy+s1]; v1 = c.f[sfxy+s2]; v2 = c.f[sfxy+s3]; v3 = c.f[sfxy+s4]; f = f+v0*ht10*hu10+v1*ht11*hu10+v2*ht10*hu11+v3*ht11*hu11; fx = fx+v0*dht10*hu10+v1*dht11*hu10+v2*dht10*hu11+v3*dht11*hu11; fy = fy+v0*ht10*dhu10+v1*ht11*dhu10+v2*ht10*dhu11+v3*ht11*dhu11; fxy = fxy+v0*dht10*dhu10+v1*dht11*dhu10+v2*dht10*dhu11+v3*dht11*dhu11; return; } } /************************************************************************* This subroutine calculates bilinear or bicubic vector-valued spline at the given point (X,Y). If you need just some specific component of vector-valued spline, you can use spline2dcalcvi() function. INPUT PARAMETERS: C - spline interpolant. X, Y- point F - output buffer, possibly preallocated array. In case array size is large enough to store result, it is not reallocated. Array which is too short will be reallocated OUTPUT PARAMETERS: F - array[D] (or larger) which stores function values -- ALGLIB PROJECT -- Copyright 01.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dcalcvbuf(spline2dinterpolant c, double x, double y, ref double[] f, alglib.xparams _params) { int ix = 0; int iy = 0; int l = 0; int r = 0; int h = 0; int i = 0; double t = 0; double dt = 0; double u = 0; double du = 0; double y1 = 0; double y2 = 0; double y3 = 0; double y4 = 0; int s1 = 0; int s2 = 0; int s3 = 0; int s4 = 0; int sfx = 0; int sfy = 0; int sfxy = 0; double t2 = 0; double t3 = 0; double u2 = 0; double u3 = 0; double ht00 = 0; double ht01 = 0; double ht10 = 0; double ht11 = 0; double hu00 = 0; double hu01 = 0; double hu10 = 0; double hu11 = 0; alglib.ap.assert(c.stype==-1 || c.stype==-3, "Spline2DCalcVBuf: incorrect C (incorrect parameter C.SType)"); alglib.ap.assert(math.isfinite(x) && math.isfinite(y), "Spline2DCalcVBuf: X or Y contains NaN or Infinite value"); // // Allocate place for output // apserv.rvectorsetlengthatleast(ref f, c.d, _params); // // Determine evaluation interval // l = 0; r = c.n-1; while( l!=r-1 ) { h = (l+r)/2; if( (double)(c.x[h])>=(double)(x) ) { r = h; } else { l = h; } } dt = 1.0/(c.x[l+1]-c.x[l]); t = (x-c.x[l])*dt; ix = l; l = 0; r = c.m-1; while( l!=r-1 ) { h = (l+r)/2; if( (double)(c.y[h])>=(double)(y) ) { r = h; } else { l = h; } } du = 1.0/(c.y[l+1]-c.y[l]); u = (y-c.y[l])*du; iy = l; // // Bilinear interpolation // if( c.stype==-1 ) { for(i=0; i<=c.d-1; i++) { y1 = c.f[c.d*(c.n*iy+ix)+i]; y2 = c.f[c.d*(c.n*iy+(ix+1))+i]; y3 = c.f[c.d*(c.n*(iy+1)+(ix+1))+i]; y4 = c.f[c.d*(c.n*(iy+1)+ix)+i]; f[i] = (1-t)*(1-u)*y1+t*(1-u)*y2+t*u*y3+(1-t)*u*y4; } return; } // // Bicubic interpolation: // * calculate Hermite basis for dimensions X and Y (variables T and U), // here HTij means basis function whose I-th derivative has value 1 at T=J. // Same for HUij. // * after initial calculation, apply scaling by DT/DU to the basis // * calculate using stored table of second derivatives // alglib.ap.assert(c.stype==-3, "Spline2DCalc: integrity check failed"); sfx = c.n*c.m*c.d; sfy = 2*c.n*c.m*c.d; sfxy = 3*c.n*c.m*c.d; s1 = (c.n*iy+ix)*c.d; s2 = (c.n*iy+(ix+1))*c.d; s3 = (c.n*(iy+1)+ix)*c.d; s4 = (c.n*(iy+1)+(ix+1))*c.d; t2 = t*t; t3 = t*t2; u2 = u*u; u3 = u*u2; ht00 = 2*t3-3*t2+1; ht10 = t3-2*t2+t; ht01 = -(2*t3)+3*t2; ht11 = t3-t2; hu00 = 2*u3-3*u2+1; hu10 = u3-2*u2+u; hu01 = -(2*u3)+3*u2; hu11 = u3-u2; ht10 = ht10/dt; ht11 = ht11/dt; hu10 = hu10/du; hu11 = hu11/du; for(i=0; i<=c.d-1; i++) { // // Calculate I-th component // f[i] = 0; f[i] = f[i]+c.f[s1]*ht00*hu00+c.f[s2]*ht01*hu00+c.f[s3]*ht00*hu01+c.f[s4]*ht01*hu01; f[i] = f[i]+c.f[sfx+s1]*ht10*hu00+c.f[sfx+s2]*ht11*hu00+c.f[sfx+s3]*ht10*hu01+c.f[sfx+s4]*ht11*hu01; f[i] = f[i]+c.f[sfy+s1]*ht00*hu10+c.f[sfy+s2]*ht01*hu10+c.f[sfy+s3]*ht00*hu11+c.f[sfy+s4]*ht01*hu11; f[i] = f[i]+c.f[sfxy+s1]*ht10*hu10+c.f[sfxy+s2]*ht11*hu10+c.f[sfxy+s3]*ht10*hu11+c.f[sfxy+s4]*ht11*hu11; // // Advance source indexes // s1 = s1+1; s2 = s2+1; s3 = s3+1; s4 = s4+1; } } /************************************************************************* This subroutine calculates specific component of vector-valued bilinear or bicubic spline at the given point (X,Y). INPUT PARAMETERS: C - spline interpolant. X, Y- point I - component index, in [0,D). An exception is generated for out of range values. RESULT: value of I-th component -- ALGLIB PROJECT -- Copyright 01.02.2018 by Bochkanov Sergey *************************************************************************/ public static double spline2dcalcvi(spline2dinterpolant c, double x, double y, int i, alglib.xparams _params) { double result = 0; int ix = 0; int iy = 0; int l = 0; int r = 0; int h = 0; double t = 0; double dt = 0; double u = 0; double du = 0; double y1 = 0; double y2 = 0; double y3 = 0; double y4 = 0; int s1 = 0; int s2 = 0; int s3 = 0; int s4 = 0; int sfx = 0; int sfy = 0; int sfxy = 0; double t2 = 0; double t3 = 0; double u2 = 0; double u3 = 0; double ht00 = 0; double ht01 = 0; double ht10 = 0; double ht11 = 0; double hu00 = 0; double hu01 = 0; double hu10 = 0; double hu11 = 0; alglib.ap.assert(c.stype==-1 || c.stype==-3, "Spline2DCalcVi: incorrect C (incorrect parameter C.SType)"); alglib.ap.assert(math.isfinite(x) && math.isfinite(y), "Spline2DCalcVi: X or Y contains NaN or Infinite value"); alglib.ap.assert(i>=0 && i=D)"); // // Determine evaluation interval // l = 0; r = c.n-1; while( l!=r-1 ) { h = (l+r)/2; if( (double)(c.x[h])>=(double)(x) ) { r = h; } else { l = h; } } dt = 1.0/(c.x[l+1]-c.x[l]); t = (x-c.x[l])*dt; ix = l; l = 0; r = c.m-1; while( l!=r-1 ) { h = (l+r)/2; if( (double)(c.y[h])>=(double)(y) ) { r = h; } else { l = h; } } du = 1.0/(c.y[l+1]-c.y[l]); u = (y-c.y[l])*du; iy = l; // // Bilinear interpolation // if( c.stype==-1 ) { y1 = c.f[c.d*(c.n*iy+ix)+i]; y2 = c.f[c.d*(c.n*iy+(ix+1))+i]; y3 = c.f[c.d*(c.n*(iy+1)+(ix+1))+i]; y4 = c.f[c.d*(c.n*(iy+1)+ix)+i]; result = (1-t)*(1-u)*y1+t*(1-u)*y2+t*u*y3+(1-t)*u*y4; return result; } // // Bicubic interpolation: // * calculate Hermite basis for dimensions X and Y (variables T and U), // here HTij means basis function whose I-th derivative has value 1 at T=J. // Same for HUij. // * after initial calculation, apply scaling by DT/DU to the basis // * calculate using stored table of second derivatives // alglib.ap.assert(c.stype==-3, "Spline2DCalc: integrity check failed"); sfx = c.n*c.m*c.d; sfy = 2*c.n*c.m*c.d; sfxy = 3*c.n*c.m*c.d; s1 = (c.n*iy+ix)*c.d; s2 = (c.n*iy+(ix+1))*c.d; s3 = (c.n*(iy+1)+ix)*c.d; s4 = (c.n*(iy+1)+(ix+1))*c.d; t2 = t*t; t3 = t*t2; u2 = u*u; u3 = u*u2; ht00 = 2*t3-3*t2+1; ht10 = t3-2*t2+t; ht01 = -(2*t3)+3*t2; ht11 = t3-t2; hu00 = 2*u3-3*u2+1; hu10 = u3-2*u2+u; hu01 = -(2*u3)+3*u2; hu11 = u3-u2; ht10 = ht10/dt; ht11 = ht11/dt; hu10 = hu10/du; hu11 = hu11/du; // // Advance source indexes to I-th position // s1 = s1+i; s2 = s2+i; s3 = s3+i; s4 = s4+i; // // Calculate I-th component // result = 0; result = result+c.f[s1]*ht00*hu00+c.f[s2]*ht01*hu00+c.f[s3]*ht00*hu01+c.f[s4]*ht01*hu01; result = result+c.f[sfx+s1]*ht10*hu00+c.f[sfx+s2]*ht11*hu00+c.f[sfx+s3]*ht10*hu01+c.f[sfx+s4]*ht11*hu01; result = result+c.f[sfy+s1]*ht00*hu10+c.f[sfy+s2]*ht01*hu10+c.f[sfy+s3]*ht00*hu11+c.f[sfy+s4]*ht01*hu11; result = result+c.f[sfxy+s1]*ht10*hu10+c.f[sfxy+s2]*ht11*hu10+c.f[sfxy+s3]*ht10*hu11+c.f[sfxy+s4]*ht11*hu11; return result; } /************************************************************************* This subroutine calculates bilinear or bicubic vector-valued spline at the given point (X,Y). INPUT PARAMETERS: C - spline interpolant. X, Y- point OUTPUT PARAMETERS: F - array[D] which stores function values. F is out-parameter and it is reallocated after call to this function. In case you want to reuse previously allocated F, you may use Spline2DCalcVBuf(), which reallocates F only when it is too small. -- ALGLIB PROJECT -- Copyright 16.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline2dcalcv(spline2dinterpolant c, double x, double y, ref double[] f, alglib.xparams _params) { f = new double[0]; alglib.ap.assert(c.stype==-1 || c.stype==-3, "Spline2DCalcV: incorrect C (incorrect parameter C.SType)"); alglib.ap.assert(math.isfinite(x) && math.isfinite(y), "Spline2DCalcV: either X=NaN/Infinite or Y=NaN/Infinite"); spline2dcalcvbuf(c, x, y, ref f, _params); } /************************************************************************* This subroutine calculates value of specific component of bilinear or bicubic vector-valued spline and its derivatives. Input parameters: C - spline interpolant. X, Y- point I - component index, in [0,D) Output parameters: F - S(x,y) FX - dS(x,y)/dX FY - dS(x,y)/dY FXY - d2S(x,y)/dXdY -- ALGLIB PROJECT -- Copyright 05.07.2007 by Bochkanov Sergey *************************************************************************/ public static void spline2ddiffvi(spline2dinterpolant c, double x, double y, int i, ref double f, ref double fx, ref double fy, ref double fxy, alglib.xparams _params) { int d = 0; double t = 0; double dt = 0; double u = 0; double du = 0; int ix = 0; int iy = 0; int l = 0; int r = 0; int h = 0; int s1 = 0; int s2 = 0; int s3 = 0; int s4 = 0; int sfx = 0; int sfy = 0; int sfxy = 0; double y1 = 0; double y2 = 0; double y3 = 0; double y4 = 0; double v0 = 0; double v1 = 0; double v2 = 0; double v3 = 0; double t2 = 0; double t3 = 0; double u2 = 0; double u3 = 0; double ht00 = 0; double ht01 = 0; double ht10 = 0; double ht11 = 0; double hu00 = 0; double hu01 = 0; double hu10 = 0; double hu11 = 0; double dht00 = 0; double dht01 = 0; double dht10 = 0; double dht11 = 0; double dhu00 = 0; double dhu01 = 0; double dhu10 = 0; double dhu11 = 0; f = 0; fx = 0; fy = 0; fxy = 0; alglib.ap.assert(c.stype==-1 || c.stype==-3, "Spline2DDiffVI: incorrect C (incorrect parameter C.SType)"); alglib.ap.assert(math.isfinite(x) && math.isfinite(y), "Spline2DDiffVI: X or Y contains NaN or Infinite value"); alglib.ap.assert(i>=0 && i=D"); // // Prepare F, dF/dX, dF/dY, d2F/dXdY // f = 0; fx = 0; fy = 0; fxy = 0; d = c.d; // // Binary search in the [ x[0], ..., x[n-2] ] (x[n-1] is not included) // l = 0; r = c.n-1; while( l!=r-1 ) { h = (l+r)/2; if( (double)(c.x[h])>=(double)(x) ) { r = h; } else { l = h; } } t = (x-c.x[l])/(c.x[l+1]-c.x[l]); dt = 1.0/(c.x[l+1]-c.x[l]); ix = l; // // Binary search in the [ y[0], ..., y[m-2] ] (y[m-1] is not included) // l = 0; r = c.m-1; while( l!=r-1 ) { h = (l+r)/2; if( (double)(c.y[h])>=(double)(y) ) { r = h; } else { l = h; } } u = (y-c.y[l])/(c.y[l+1]-c.y[l]); du = 1.0/(c.y[l+1]-c.y[l]); iy = l; // // Bilinear interpolation // if( c.stype==-1 ) { y1 = c.f[d*(c.n*iy+ix)+i]; y2 = c.f[d*(c.n*iy+(ix+1))+i]; y3 = c.f[d*(c.n*(iy+1)+(ix+1))+i]; y4 = c.f[d*(c.n*(iy+1)+ix)+i]; f = (1-t)*(1-u)*y1+t*(1-u)*y2+t*u*y3+(1-t)*u*y4; fx = (-((1-u)*y1)+(1-u)*y2+u*y3-u*y4)*dt; fy = (-((1-t)*y1)-t*y2+t*y3+(1-t)*y4)*du; fxy = (y1-y2+y3-y4)*du*dt; return; } // // Bicubic interpolation // if( c.stype==-3 ) { sfx = c.n*c.m*d; sfy = 2*c.n*c.m*d; sfxy = 3*c.n*c.m*d; s1 = d*(c.n*iy+ix)+i; s2 = d*(c.n*iy+(ix+1))+i; s3 = d*(c.n*(iy+1)+ix)+i; s4 = d*(c.n*(iy+1)+(ix+1))+i; t2 = t*t; t3 = t*t2; u2 = u*u; u3 = u*u2; ht00 = 2*t3-3*t2+1; ht10 = t3-2*t2+t; ht01 = -(2*t3)+3*t2; ht11 = t3-t2; hu00 = 2*u3-3*u2+1; hu10 = u3-2*u2+u; hu01 = -(2*u3)+3*u2; hu11 = u3-u2; ht10 = ht10/dt; ht11 = ht11/dt; hu10 = hu10/du; hu11 = hu11/du; dht00 = 6*t2-6*t; dht10 = 3*t2-4*t+1; dht01 = -(6*t2)+6*t; dht11 = 3*t2-2*t; dhu00 = 6*u2-6*u; dhu10 = 3*u2-4*u+1; dhu01 = -(6*u2)+6*u; dhu11 = 3*u2-2*u; dht00 = dht00*dt; dht01 = dht01*dt; dhu00 = dhu00*du; dhu01 = dhu01*du; f = 0; fx = 0; fy = 0; fxy = 0; v0 = c.f[s1]; v1 = c.f[s2]; v2 = c.f[s3]; v3 = c.f[s4]; f = f+v0*ht00*hu00+v1*ht01*hu00+v2*ht00*hu01+v3*ht01*hu01; fx = fx+v0*dht00*hu00+v1*dht01*hu00+v2*dht00*hu01+v3*dht01*hu01; fy = fy+v0*ht00*dhu00+v1*ht01*dhu00+v2*ht00*dhu01+v3*ht01*dhu01; fxy = fxy+v0*dht00*dhu00+v1*dht01*dhu00+v2*dht00*dhu01+v3*dht01*dhu01; v0 = c.f[sfx+s1]; v1 = c.f[sfx+s2]; v2 = c.f[sfx+s3]; v3 = c.f[sfx+s4]; f = f+v0*ht10*hu00+v1*ht11*hu00+v2*ht10*hu01+v3*ht11*hu01; fx = fx+v0*dht10*hu00+v1*dht11*hu00+v2*dht10*hu01+v3*dht11*hu01; fy = fy+v0*ht10*dhu00+v1*ht11*dhu00+v2*ht10*dhu01+v3*ht11*dhu01; fxy = fxy+v0*dht10*dhu00+v1*dht11*dhu00+v2*dht10*dhu01+v3*dht11*dhu01; v0 = c.f[sfy+s1]; v1 = c.f[sfy+s2]; v2 = c.f[sfy+s3]; v3 = c.f[sfy+s4]; f = f+v0*ht00*hu10+v1*ht01*hu10+v2*ht00*hu11+v3*ht01*hu11; fx = fx+v0*dht00*hu10+v1*dht01*hu10+v2*dht00*hu11+v3*dht01*hu11; fy = fy+v0*ht00*dhu10+v1*ht01*dhu10+v2*ht00*dhu11+v3*ht01*dhu11; fxy = fxy+v0*dht00*dhu10+v1*dht01*dhu10+v2*dht00*dhu11+v3*dht01*dhu11; v0 = c.f[sfxy+s1]; v1 = c.f[sfxy+s2]; v2 = c.f[sfxy+s3]; v3 = c.f[sfxy+s4]; f = f+v0*ht10*hu10+v1*ht11*hu10+v2*ht10*hu11+v3*ht11*hu11; fx = fx+v0*dht10*hu10+v1*dht11*hu10+v2*dht10*hu11+v3*dht11*hu11; fy = fy+v0*ht10*dhu10+v1*ht11*dhu10+v2*ht10*dhu11+v3*ht11*dhu11; fxy = fxy+v0*dht10*dhu10+v1*dht11*dhu10+v2*dht10*dhu11+v3*dht11*dhu11; return; } } /************************************************************************* This subroutine performs linear transformation of the spline argument. Input parameters: C - spline interpolant AX, BX - transformation coefficients: x = A*t + B AY, BY - transformation coefficients: y = A*u + B Result: C - transformed spline -- ALGLIB PROJECT -- Copyright 30.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline2dlintransxy(spline2dinterpolant c, double ax, double bx, double ay, double by, alglib.xparams _params) { double[] x = new double[0]; double[] y = new double[0]; double[] f = new double[0]; double[] v = new double[0]; int i = 0; int j = 0; int k = 0; alglib.ap.assert(c.stype==-3 || c.stype==-1, "Spline2DLinTransXY: incorrect C (incorrect parameter C.SType)"); alglib.ap.assert(math.isfinite(ax), "Spline2DLinTransXY: AX is infinite or NaN"); alglib.ap.assert(math.isfinite(bx), "Spline2DLinTransXY: BX is infinite or NaN"); alglib.ap.assert(math.isfinite(ay), "Spline2DLinTransXY: AY is infinite or NaN"); alglib.ap.assert(math.isfinite(by), "Spline2DLinTransXY: BY is infinite or NaN"); x = new double[c.n]; y = new double[c.m]; f = new double[c.m*c.n*c.d]; for(j=0; j<=c.n-1; j++) { x[j] = c.x[j]; } for(i=0; i<=c.m-1; i++) { y[i] = c.y[i]; } for(i=0; i<=c.m-1; i++) { for(j=0; j<=c.n-1; j++) { for(k=0; k<=c.d-1; k++) { f[c.d*(i*c.n+j)+k] = c.f[c.d*(i*c.n+j)+k]; } } } // // Handle different combinations of AX/AY // if( (double)(ax)==(double)(0) && (double)(ay)!=(double)(0) ) { for(i=0; i<=c.m-1; i++) { spline2dcalcvbuf(c, bx, y[i], ref v, _params); y[i] = (y[i]-by)/ay; for(j=0; j<=c.n-1; j++) { for(k=0; k<=c.d-1; k++) { f[c.d*(i*c.n+j)+k] = v[k]; } } } } if( (double)(ax)!=(double)(0) && (double)(ay)==(double)(0) ) { for(j=0; j<=c.n-1; j++) { spline2dcalcvbuf(c, x[j], by, ref v, _params); x[j] = (x[j]-bx)/ax; for(i=0; i<=c.m-1; i++) { for(k=0; k<=c.d-1; k++) { f[c.d*(i*c.n+j)+k] = v[k]; } } } } if( (double)(ax)!=(double)(0) && (double)(ay)!=(double)(0) ) { for(j=0; j<=c.n-1; j++) { x[j] = (x[j]-bx)/ax; } for(i=0; i<=c.m-1; i++) { y[i] = (y[i]-by)/ay; } } if( (double)(ax)==(double)(0) && (double)(ay)==(double)(0) ) { spline2dcalcvbuf(c, bx, by, ref v, _params); for(i=0; i<=c.m-1; i++) { for(j=0; j<=c.n-1; j++) { for(k=0; k<=c.d-1; k++) { f[c.d*(i*c.n+j)+k] = v[k]; } } } } // // Rebuild spline // if( c.stype==-3 ) { spline2dbuildbicubicv(x, c.n, y, c.m, f, c.d, c, _params); } if( c.stype==-1 ) { spline2dbuildbilinearv(x, c.n, y, c.m, f, c.d, c, _params); } } /************************************************************************* This subroutine performs linear transformation of the spline. Input parameters: C - spline interpolant. A, B- transformation coefficients: S2(x,y) = A*S(x,y) + B Output parameters: C - transformed spline -- ALGLIB PROJECT -- Copyright 30.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline2dlintransf(spline2dinterpolant c, double a, double b, alglib.xparams _params) { double[] x = new double[0]; double[] y = new double[0]; double[] f = new double[0]; int i = 0; int j = 0; alglib.ap.assert(c.stype==-3 || c.stype==-1, "Spline2DLinTransF: incorrect C (incorrect parameter C.SType)"); x = new double[c.n]; y = new double[c.m]; f = new double[c.m*c.n*c.d]; for(j=0; j<=c.n-1; j++) { x[j] = c.x[j]; } for(i=0; i<=c.m-1; i++) { y[i] = c.y[i]; } for(i=0; i<=c.m*c.n*c.d-1; i++) { f[i] = a*c.f[i]+b; } if( c.stype==-3 ) { spline2dbuildbicubicv(x, c.n, y, c.m, f, c.d, c, _params); } if( c.stype==-1 ) { spline2dbuildbilinearv(x, c.n, y, c.m, f, c.d, c, _params); } } /************************************************************************* This subroutine makes the copy of the spline model. Input parameters: C - spline interpolant Output parameters: CC - spline copy -- ALGLIB PROJECT -- Copyright 29.06.2007 by Bochkanov Sergey *************************************************************************/ public static void spline2dcopy(spline2dinterpolant c, spline2dinterpolant cc, alglib.xparams _params) { int tblsize = 0; int i_ = 0; alglib.ap.assert(c.stype==-1 || c.stype==-3, "Spline2DCopy: incorrect C (incorrect parameter C.SType)"); cc.n = c.n; cc.m = c.m; cc.d = c.d; cc.stype = c.stype; tblsize = -1; if( c.stype==-3 ) { tblsize = 4*c.n*c.m*c.d; } if( c.stype==-1 ) { tblsize = c.n*c.m*c.d; } alglib.ap.assert(tblsize>0, "Spline2DCopy: internal error"); cc.x = new double[cc.n]; cc.y = new double[cc.m]; cc.f = new double[tblsize]; for(i_=0; i_<=cc.n-1;i_++) { cc.x[i_] = c.x[i_]; } for(i_=0; i_<=cc.m-1;i_++) { cc.y[i_] = c.y[i_]; } for(i_=0; i_<=tblsize-1;i_++) { cc.f[i_] = c.f[i_]; } } /************************************************************************* Bicubic spline resampling Input parameters: A - function values at the old grid, array[0..OldHeight-1, 0..OldWidth-1] OldHeight - old grid height, OldHeight>1 OldWidth - old grid width, OldWidth>1 NewHeight - new grid height, NewHeight>1 NewWidth - new grid width, NewWidth>1 Output parameters: B - function values at the new grid, array[0..NewHeight-1, 0..NewWidth-1] -- ALGLIB routine -- 15 May, 2007 Copyright by Bochkanov Sergey *************************************************************************/ public static void spline2dresamplebicubic(double[,] a, int oldheight, int oldwidth, ref double[,] b, int newheight, int newwidth, alglib.xparams _params) { double[,] buf = new double[0,0]; double[] x = new double[0]; double[] y = new double[0]; spline1d.spline1dinterpolant c = new spline1d.spline1dinterpolant(); int mw = 0; int mh = 0; int i = 0; int j = 0; b = new double[0,0]; alglib.ap.assert(oldwidth>1 && oldheight>1, "Spline2DResampleBicubic: width/height less than 1"); alglib.ap.assert(newwidth>1 && newheight>1, "Spline2DResampleBicubic: width/height less than 1"); // // Prepare // mw = Math.Max(oldwidth, newwidth); mh = Math.Max(oldheight, newheight); b = new double[newheight, newwidth]; buf = new double[oldheight, newwidth]; x = new double[Math.Max(mw, mh)]; y = new double[Math.Max(mw, mh)]; // // Horizontal interpolation // for(i=0; i<=oldheight-1; i++) { // // Fill X, Y // for(j=0; j<=oldwidth-1; j++) { x[j] = (double)j/(double)(oldwidth-1); y[j] = a[i,j]; } // // Interpolate and place result into temporary matrix // spline1d.spline1dbuildcubic(x, y, oldwidth, 0, 0.0, 0, 0.0, c, _params); for(j=0; j<=newwidth-1; j++) { buf[i,j] = spline1d.spline1dcalc(c, (double)j/(double)(newwidth-1), _params); } } // // Vertical interpolation // for(j=0; j<=newwidth-1; j++) { // // Fill X, Y // for(i=0; i<=oldheight-1; i++) { x[i] = (double)i/(double)(oldheight-1); y[i] = buf[i,j]; } // // Interpolate and place result into B // spline1d.spline1dbuildcubic(x, y, oldheight, 0, 0.0, 0, 0.0, c, _params); for(i=0; i<=newheight-1; i++) { b[i,j] = spline1d.spline1dcalc(c, (double)i/(double)(newheight-1), _params); } } } /************************************************************************* Bilinear spline resampling Input parameters: A - function values at the old grid, array[0..OldHeight-1, 0..OldWidth-1] OldHeight - old grid height, OldHeight>1 OldWidth - old grid width, OldWidth>1 NewHeight - new grid height, NewHeight>1 NewWidth - new grid width, NewWidth>1 Output parameters: B - function values at the new grid, array[0..NewHeight-1, 0..NewWidth-1] -- ALGLIB routine -- 09.07.2007 Copyright by Bochkanov Sergey *************************************************************************/ public static void spline2dresamplebilinear(double[,] a, int oldheight, int oldwidth, ref double[,] b, int newheight, int newwidth, alglib.xparams _params) { int l = 0; int c = 0; double t = 0; double u = 0; int i = 0; int j = 0; b = new double[0,0]; alglib.ap.assert(oldwidth>1 && oldheight>1, "Spline2DResampleBilinear: width/height less than 1"); alglib.ap.assert(newwidth>1 && newheight>1, "Spline2DResampleBilinear: width/height less than 1"); b = new double[newheight, newwidth]; for(i=0; i<=newheight-1; i++) { for(j=0; j<=newwidth-1; j++) { l = i*(oldheight-1)/(newheight-1); if( l==oldheight-1 ) { l = oldheight-2; } u = (double)i/(double)(newheight-1)*(oldheight-1)-l; c = j*(oldwidth-1)/(newwidth-1); if( c==oldwidth-1 ) { c = oldwidth-2; } t = (double)(j*(oldwidth-1))/(double)(newwidth-1)-c; b[i,j] = (1-t)*(1-u)*a[l,c]+t*(1-u)*a[l,c+1]+t*u*a[l+1,c+1]+(1-t)*u*a[l+1,c]; } } } /************************************************************************* This subroutine builds bilinear vector-valued spline. Input parameters: X - spline abscissas, array[0..N-1] Y - spline ordinates, array[0..M-1] F - function values, array[0..M*N*D-1]: * first D elements store D values at (X[0],Y[0]) * next D elements store D values at (X[1],Y[0]) * general form - D function values at (X[i],Y[j]) are stored at F[D*(J*N+I)...D*(J*N+I)+D-1]. M,N - grid size, M>=2, N>=2 D - vector dimension, D>=1 Output parameters: C - spline interpolant -- ALGLIB PROJECT -- Copyright 16.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildbilinearv(double[] x, int n, double[] y, int m, double[] f, int d, spline2dinterpolant c, alglib.xparams _params) { double t = 0; int i = 0; int j = 0; int k = 0; int i0 = 0; alglib.ap.assert(n>=2, "Spline2DBuildBilinearV: N is less then 2"); alglib.ap.assert(m>=2, "Spline2DBuildBilinearV: M is less then 2"); alglib.ap.assert(d>=1, "Spline2DBuildBilinearV: invalid argument D (D<1)"); alglib.ap.assert(alglib.ap.len(x)>=n && alglib.ap.len(y)>=m, "Spline2DBuildBilinearV: length of X or Y is too short (Length(X/Y)=k, "Spline2DBuildBilinearV: length of F is too short (Length(F)=2, N>=2 D - vector dimension, D>=1 Output parameters: C - spline interpolant -- ALGLIB PROJECT -- Copyright 16.04.2012 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildbicubicv(double[] x, int n, double[] y, int m, double[] f, int d, spline2dinterpolant c, alglib.xparams _params) { double[,] tf = new double[0,0]; double[,] dx = new double[0,0]; double[,] dy = new double[0,0]; double[,] dxy = new double[0,0]; double t = 0; int i = 0; int j = 0; int k = 0; int di = 0; f = (double[])f.Clone(); alglib.ap.assert(n>=2, "Spline2DBuildBicubicV: N is less than 2"); alglib.ap.assert(m>=2, "Spline2DBuildBicubicV: M is less than 2"); alglib.ap.assert(d>=1, "Spline2DBuildBicubicV: invalid argument D (D<1)"); alglib.ap.assert(alglib.ap.len(x)>=n && alglib.ap.len(y)>=m, "Spline2DBuildBicubicV: length of X or Y is too short (Length(X/Y)=k, "Spline2DBuildBicubicV: length of F is too short (Length(F)=2, "Spline2DBuildBilinear: N<2"); alglib.ap.assert(m>=2, "Spline2DBuildBilinear: M<2"); alglib.ap.assert(alglib.ap.len(x)>=n && alglib.ap.len(y)>=m, "Spline2DBuildBilinear: length of X or Y is too short (Length(X/Y)=m && alglib.ap.cols(f)>=n, "Spline2DBuildBilinear: size of F is too small (rows(F)=2, "Spline2DBuildBicubicSpline: N<2"); alglib.ap.assert(m>=2, "Spline2DBuildBicubicSpline: M<2"); alglib.ap.assert(alglib.ap.len(x)>=n && alglib.ap.len(y)>=m, "Spline2DBuildBicubic: length of X or Y is too short (Length(X/Y)=m && alglib.ap.cols(f)>=n, "Spline2DBuildBicubic: size of F is too small (rows(F)1 for vector-valued spline fitting. OUTPUT PARAMETERS: S - solver object -- ALGLIB PROJECT -- Copyright 29.01.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildercreate(int d, spline2dbuilder state, alglib.xparams _params) { alglib.ap.assert(d>=1, "Spline2DBuilderCreate: D<=0"); // // NOTES: // // 1. Prior term is set to linear one (good default option) // 2. Solver is set to BlockLLS - good enough for small-scale problems. // 3. Refinement rounds: 5; enough to get good convergence. // state.priorterm = 1; state.priortermval = 0; state.areatype = 0; state.gridtype = 0; state.smoothing = 0.0; state.nlayers = 0; state.solvertype = 1; state.npoints = 0; state.d = d; state.sx = 1.0; state.sy = 1.0; state.lsqrcnt = 5; // // Algorithm settings // state.adddegreeoffreedom = true; state.maxcoresize = 16; state.interfacesize = 5; } /************************************************************************* This function sets constant prior term (model is a sum of bicubic spline and global prior, which can be linear, constant, user-defined constant or zero). Constant prior term is determined by least squares fitting. INPUT PARAMETERS: S - spline builder V - value for user-defined prior -- ALGLIB -- Copyright 01.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetuserterm(spline2dbuilder state, double v, alglib.xparams _params) { alglib.ap.assert(math.isfinite(v), "Spline2DBuilderSetUserTerm: infinite/NAN value passed"); state.priorterm = 0; state.priortermval = v; } /************************************************************************* This function sets linear prior term (model is a sum of bicubic spline and global prior, which can be linear, constant, user-defined constant or zero). Linear prior term is determined by least squares fitting. INPUT PARAMETERS: S - spline builder -- ALGLIB -- Copyright 01.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetlinterm(spline2dbuilder state, alglib.xparams _params) { state.priorterm = 1; } /************************************************************************* This function sets constant prior term (model is a sum of bicubic spline and global prior, which can be linear, constant, user-defined constant or zero). Constant prior term is determined by least squares fitting. INPUT PARAMETERS: S - spline builder -- ALGLIB -- Copyright 01.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetconstterm(spline2dbuilder state, alglib.xparams _params) { state.priorterm = 2; } /************************************************************************* This function sets zero prior term (model is a sum of bicubic spline and global prior, which can be linear, constant, user-defined constant or zero). INPUT PARAMETERS: S - spline builder -- ALGLIB -- Copyright 01.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetzeroterm(spline2dbuilder state, alglib.xparams _params) { state.priorterm = 3; } /************************************************************************* This function adds dataset to the builder object. This function overrides results of the previous calls, i.e. multiple calls of this function will result in only the last set being added. INPUT PARAMETERS: S - spline 2D builder object XY - points, array[N,2+D]. One row corresponds to one point in the dataset. First 2 elements are coordinates, next D elements are function values. Array may be larger than specified, in this case only leading [N,NX+NY] elements will be used. N - number of points in the dataset -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetpoints(spline2dbuilder state, double[,] xy, int n, alglib.xparams _params) { int i = 0; int j = 0; int ew = 0; alglib.ap.assert(n>0, "Spline2DBuilderSetPoints: N<0"); alglib.ap.assert(alglib.ap.rows(xy)>=n, "Spline2DBuilderSetPoints: Rows(XY)=2+state.d, "Spline2DBuilderSetPoints: Cols(XY)=XB"); alglib.ap.assert((double)(ya)<(double)(yb), "Spline2DBuilderSetArea: YA>=YB"); state.areatype = 1; state.xa = xa; state.xb = xb; state.ya = ya; state.yb = yb; } /************************************************************************* This function sets nodes count for 2D spline interpolant. Fitting is performed on area defined with one of the "setarea" functions; this one sets number of nodes placed upon the fitting area. INPUT PARAMETERS: S - spline 2D builder object KX - nodes count for the first (X) dimension; fitting interval [XA,XB] is separated into KX-1 subintervals, with KX nodes created at the boundaries. KY - nodes count for the first (Y) dimension; fitting interval [YA,YB] is separated into KY-1 subintervals, with KY nodes created at the boundaries. NOTE: at least 4 nodes is created in each dimension, so KX and KY are silently increased if needed. -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetgrid(spline2dbuilder state, int kx, int ky, alglib.xparams _params) { alglib.ap.assert(kx>0, "Spline2DBuilderSetGridSizePrecisely: KX<=0"); alglib.ap.assert(ky>0, "Spline2DBuilderSetGridSizePrecisely: KY<=0"); state.gridtype = 1; state.kx = Math.Max(kx, 4); state.ky = Math.Max(ky, 4); } /************************************************************************* This function allows you to choose least squares solver used to perform fitting. This function sets solver algorithm to "FastDDM", which performs fast parallel fitting by splitting problem into smaller chunks and merging results together. This solver is optimized for large-scale problems, starting from 256x256 grids, and up to 10000x10000 grids. Of course, it will work for smaller grids too. More detailed description of the algorithm is given below: * algorithm generates hierarchy of nested grids, ranging from ~16x16 (topmost "layer" of the model) to ~KX*KY one (final layer). Upper layers model global behavior of the function, lower layers are used to model fine details. Moving from layer to layer doubles grid density. * fitting is started from topmost layer, subsequent layers are fitted using residuals from previous ones. * user may choose to skip generation of upper layers and generate only a few bottom ones, which will result in much better performance and parallelization efficiency, at the cost of algorithm inability to "patch" large holes in the dataset. * every layer is regularized using progressively increasing regularization coefficient; thus, increasing LambdaV penalizes fine details first, leaving lower frequencies almost intact for a while. * after fitting is done, all layers are merged together into one bicubic spline IMPORTANT: regularization coefficient used by this solver is different from the one used by BlockLLS. Latter utilizes nonlinearity penalty, which is global in nature (large regularization results in global linear trend being extracted); this solver uses another, localized form of penalty, which is suitable for parallel processing. Notes on memory and performance: * memory requirements: most memory is consumed during modeling of the higher layers; ~[512*NPoints] bytes is required for a model with full hierarchy of grids being generated. However, if you skip a few topmost layers, you will get nearly constant (wrt. points count and grid size) memory consumption. * serial running time: O(K*K)+O(NPoints) for a KxK grid * parallelism potential: good. You may get nearly linear speed-up when performing fitting with just a few layers. Adding more layers results in model becoming more global, which somewhat reduces efficiency of the parallel code. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: S - spline 2D builder object NLayers - number of layers in the model: * NLayers>=1 means that up to chosen number of bottom layers is fitted * NLayers=0 means that maximum number of layers is chosen (according to current grid size) * NLayers<=-1 means that up to |NLayers| topmost layers is skipped Recommendations: * good "default" value is 2 layers * you may need more layers, if your dataset is very irregular and you want to "patch" large holes. For a grid step H (equal to AreaWidth/GridSize) you may expect that last layer reproduces variations at distance H (and can patch holes that wide); that higher layers operate at distances 2*H, 4*H, 8*H and so on. * good value for "bullletproof" mode is NLayers=0, which results in complete hierarchy of layers being generated. LambdaV - regularization coefficient, chosen in such a way that it penalizes bottom layers (fine details) first. LambdaV>=0, zero value means that no penalty is applied. -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetalgofastddm(spline2dbuilder state, int nlayers, double lambdav, alglib.xparams _params) { alglib.ap.assert(math.isfinite(lambdav), "Spline2DBuilderSetAlgoFastDDM: LambdaV is not finite value"); alglib.ap.assert((double)(lambdav)>=(double)(0), "Spline2DBuilderSetAlgoFastDDM: LambdaV<0"); state.solvertype = 3; state.nlayers = nlayers; state.smoothing = lambdav; } /************************************************************************* This function allows you to choose least squares solver used to perform fitting. This function sets solver algorithm to "BlockLLS", which performs least squares fitting with fast sparse direct solver, with optional nonsmoothness penalty being applied. Nonlinearity penalty has the following form: [ ] P() ~ Lambda* integral[ (d2S/dx2)^2 + 2*(d2S/dxdy)^2 + (d2S/dy2)^2 ]dxdy [ ] here integral is calculated over entire grid, and "~" means "proportional" because integral is normalized after calcilation. Extremely large values of Lambda result in linear fit being performed. NOTE: this algorithm is the most robust and controllable one, but it is limited by 512x512 grids and (say) up to 1.000.000 points. However, ALGLIB has one more spline solver: FastDDM algorithm, which is intended for really large-scale problems (in 10M-100M range). FastDDM algorithm also has better parallelism properties. More information on BlockLLS solver: * memory requirements: ~[32*K^3+256*NPoints] bytes for KxK grid with NPoints-sized dataset * serial running time: O(K^4+NPoints) * parallelism potential: limited. You may get some sublinear gain when working with large grids (K's in 256..512 range) ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: S - spline 2D builder object LambdaNS- non-negative value: * positive value means that some smoothing is applied * zero value means that no smoothing is applied, and corresponding entries of design matrix are numerically zero and dropped from consideration. -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetalgoblocklls(spline2dbuilder state, double lambdans, alglib.xparams _params) { alglib.ap.assert(math.isfinite(lambdans), "Spline2DBuilderSetAlgoBlockLLS: LambdaNS is not finite value"); alglib.ap.assert((double)(lambdans)>=(double)(0), "Spline2DBuilderSetAlgoBlockLLS: LambdaNS<0"); state.solvertype = 1; state.smoothing = lambdans; } /************************************************************************* This function allows you to choose least squares solver used to perform fitting. This function sets solver algorithm to "NaiveLLS". IMPORTANT: NaiveLLS is NOT intended to be used in real life code! This algorithm solves problem by generated dense (K^2)x(K^2+NPoints) matrix and solves linear least squares problem with dense solver. It is here just to test BlockLLS against reference solver (and maybe for someone trying to compare well optimized solver against straightforward approach to the LLS problem). More information on naive LLS solver: * memory requirements: ~[8*K^4+256*NPoints] bytes for KxK grid. * serial running time: O(K^6+NPoints) for KxK grid * when compared with BlockLLS, NaiveLLS has ~K larger memory demand and ~K^2 larger running time. INPUT PARAMETERS: S - spline 2D builder object LambdaNS- nonsmoothness penalty -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dbuildersetalgonaivells(spline2dbuilder state, double lambdans, alglib.xparams _params) { alglib.ap.assert(math.isfinite(lambdans), "Spline2DBuilderSetAlgoBlockLLS: LambdaNS is not finite value"); alglib.ap.assert((double)(lambdans)>=(double)(0), "Spline2DBuilderSetAlgoBlockLLS: LambdaNS<0"); state.solvertype = 2; state.smoothing = lambdans; } /************************************************************************* This function fits bicubic spline to current dataset, using current area/ grid and current LLS solver. ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! * hardware vendor (Intel) implementations of linear algebra primitives ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: State - spline 2D builder object OUTPUT PARAMETERS: S - 2D spline, fit result Rep - fitting report, which provides some additional info about errors, R2 coefficient and so on. -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dfit(spline2dbuilder state, spline2dinterpolant s, spline2dfitreport rep, alglib.xparams _params) { double xa = 0; double xb = 0; double ya = 0; double yb = 0; double xaraw = 0; double xbraw = 0; double yaraw = 0; double ybraw = 0; int kx = 0; int ky = 0; double hx = 0; double hy = 0; double invhx = 0; double invhy = 0; int gridexpansion = 0; int nzwidth = 0; int bfrad = 0; int npoints = 0; int d = 0; int ew = 0; int i = 0; int j = 0; int k = 0; double v = 0; int k0 = 0; int k1 = 0; double vx = 0; double vy = 0; int arows = 0; int acopied = 0; int basecasex = 0; int basecasey = 0; double eps = 0; double[] xywork = new double[0]; double[,] vterm = new double[0,0]; double[] tmpx = new double[0]; double[] tmpy = new double[0]; double[] tmp0 = new double[0]; double[] tmp1 = new double[0]; double[] meany = new double[0]; int[] xyindex = new int[0]; int[] tmpi = new int[0]; spline1d.spline1dinterpolant basis1 = new spline1d.spline1dinterpolant(); sparse.sparsematrix av = new sparse.sparsematrix(); sparse.sparsematrix ah = new sparse.sparsematrix(); spline2dxdesignmatrix xdesignmatrix = new spline2dxdesignmatrix(); double[] z = new double[0]; spline2dblockllsbuf blockllsbuf = new spline2dblockllsbuf(); int sfx = 0; int sfy = 0; int sfxy = 0; double tss = 0; int dstidx = 0; nzwidth = 4; bfrad = 2; npoints = state.npoints; d = state.d; ew = 2+d; // // Integrity checks // alglib.ap.assert((double)(state.sx)==(double)(1), "Spline2DFit: integrity error"); alglib.ap.assert((double)(state.sy)==(double)(1), "Spline2DFit: integrity error"); // // Determine actual area size and grid step // // NOTE: initialize vars by zeros in order to avoid spurious // compiler warnings. // xa = 0; xb = 0; ya = 0; yb = 0; if( state.areatype==0 ) { if( npoints>0 ) { xa = state.xy[0]; xb = state.xy[0]; ya = state.xy[1]; yb = state.xy[1]; for(i=1; i<=npoints-1; i++) { xa = Math.Min(xa, state.xy[i*ew+0]); xb = Math.Max(xb, state.xy[i*ew+0]); ya = Math.Min(ya, state.xy[i*ew+1]); yb = Math.Max(yb, state.xy[i*ew+1]); } } else { xa = -1; xb = 1; ya = -1; yb = 1; } } else { if( state.areatype==1 ) { xa = state.xa; xb = state.xb; ya = state.ya; yb = state.yb; } else { alglib.ap.assert(false); } } if( (double)(xa)==(double)(xb) ) { v = xa; if( (double)(v)>=(double)(0) ) { xa = v/2-1; xb = v*2+1; } else { xa = v*2-1; xb = v/2+1; } } if( (double)(ya)==(double)(yb) ) { v = ya; if( (double)(v)>=(double)(0) ) { ya = v/2-1; yb = v*2+1; } else { ya = v*2-1; yb = v/2+1; } } alglib.ap.assert((double)(xa)<(double)(xb), "Spline2DFit: integrity error"); alglib.ap.assert((double)(ya)<(double)(yb), "Spline2DFit: integrity error"); kx = 0; ky = 0; if( state.gridtype==0 ) { kx = 4; ky = 4; } else { if( state.gridtype==1 ) { kx = state.kx; ky = state.ky; } else { alglib.ap.assert(false); } } alglib.ap.assert(kx>0, "Spline2DFit: integrity error"); alglib.ap.assert(ky>0, "Spline2DFit: integrity error"); basecasex = -1; basecasey = -1; if( state.solvertype==3 ) { // // Large-scale solver with special requirements to grid size. // kx = Math.Max(kx, nzwidth); ky = Math.Max(ky, nzwidth); k = 1; while( apserv.imin2(kx, ky, _params)>state.maxcoresize+1 ) { kx = apserv.idivup(kx-1, 2, _params)+1; ky = apserv.idivup(ky-1, 2, _params)+1; k = k+1; } basecasex = kx-1; k0 = 1; while( kx>state.maxcoresize+1 ) { basecasex = apserv.idivup(kx-1, 2, _params); kx = basecasex+1; k0 = k0+1; } while( k0>1 ) { kx = (kx-1)*2+1; k0 = k0-1; } basecasey = ky-1; k1 = 1; while( ky>state.maxcoresize+1 ) { basecasey = apserv.idivup(ky-1, 2, _params); ky = basecasey+1; k1 = k1+1; } while( k1>1 ) { ky = (ky-1)*2+1; k1 = k1-1; } while( k>1 ) { kx = (kx-1)*2+1; ky = (ky-1)*2+1; k = k-1; } // // Grid is NOT expanded. We have very strict requirements on // grid size, and we do not want to overcomplicate it by // playing with grid size in order to add one more degree of // freedom. It is not relevant for such large tasks. // gridexpansion = 0; } else { // // Medium-scale solvers which are tolerant to grid size. // kx = Math.Max(kx, nzwidth); ky = Math.Max(ky, nzwidth); // // Grid is expanded by 1 in order to add one more effective degree // of freedom to the spline. Having additional nodes outside of the // area allows us to emulate changes in the derivative at the bound // without having specialized "boundary" version of the basis function. // if( state.adddegreeoffreedom ) { gridexpansion = 1; } else { gridexpansion = 0; } } hx = apserv.coalesce(xb-xa, 1.0, _params)/(kx-1); hy = apserv.coalesce(yb-ya, 1.0, _params)/(ky-1); invhx = 1/hx; invhy = 1/hy; // // We determined "raw" grid size. Now perform a grid correction according // to current grid expansion size. // xaraw = xa; yaraw = ya; xbraw = xb; ybraw = yb; xa = xa-hx*gridexpansion; ya = ya-hy*gridexpansion; xb = xb+hx*gridexpansion; yb = yb+hy*gridexpansion; kx = kx+2*gridexpansion; ky = ky+2*gridexpansion; // // Create output spline using transformed (unit-scale) // coordinates, fill by zero values // s.d = d; s.n = kx; s.m = ky; s.stype = -3; sfx = s.n*s.m*d; sfy = 2*s.n*s.m*d; sfxy = 3*s.n*s.m*d; s.x = new double[s.n]; s.y = new double[s.m]; s.f = new double[4*s.n*s.m*d]; for(i=0; i<=s.n-1; i++) { s.x[i] = i; } for(i=0; i<=s.m-1; i++) { s.y[i] = i; } for(i=0; i<=4*s.n*s.m*d-1; i++) { s.f[i] = 0.0; } // // Create local copy of dataset (only points in the grid are copied; // we allow small step out of the grid, by Eps*H, in order to deal // with numerical rounding errors). // // An additional copy of Y-values is created at columns beyond 2+J; // it is preserved during all transformations. This copy is used // to calculate error-related metrics. // // Calculate mean(Y), TSS // meany = new double[d]; for(j=0; j<=d-1; j++) { meany[j] = 0; } apserv.rvectorsetlengthatleast(ref xywork, npoints*ew, _params); acopied = 0; eps = 1.0E-6; for(i=0; i<=npoints-1; i++) { vx = state.xy[i*ew+0]; vy = state.xy[i*ew+1]; if( (((double)(xaraw-eps*hx)<=(double)(vx) && (double)(vx)<=(double)(xbraw+eps*hx)) && (double)(yaraw-eps*hy)<=(double)(vy)) && (double)(vy)<=(double)(ybraw+eps*hy) ) { xywork[acopied*ew+0] = (vx-xa)*invhx; xywork[acopied*ew+1] = (vy-ya)*invhy; for(j=0; j<=d-1; j++) { v = state.xy[i*ew+2+j]; xywork[acopied*ew+2+j] = v; meany[j] = meany[j]+v; } acopied = acopied+1; } } npoints = acopied; for(j=0; j<=d-1; j++) { meany[j] = meany[j]/apserv.coalesce(npoints, 1, _params); } tss = 0.0; for(i=0; i<=npoints-1; i++) { for(j=0; j<=d-1; j++) { tss = tss+math.sqr(xywork[i*ew+2+j]-meany[j]); } } tss = apserv.coalesce(tss, 1.0, _params); // // Handle prior term. // Modify output spline. // Quick exit if dataset is empty. // intfitserv.buildpriorterm1(xywork, npoints, 2, d, state.priorterm, state.priortermval, ref vterm, _params); if( npoints==0 ) { // // Quick exit // for(k=0; k<=s.n*s.m-1; k++) { k0 = k%s.n; k1 = k/s.n; for(j=0; j<=d-1; j++) { dstidx = d*(k1*s.n+k0)+j; s.f[dstidx] = s.f[dstidx]+vterm[j,0]*s.x[k0]+vterm[j,1]*s.y[k1]+vterm[j,2]; s.f[sfx+dstidx] = s.f[sfx+dstidx]+vterm[j,0]; s.f[sfy+dstidx] = s.f[sfy+dstidx]+vterm[j,1]; } } for(i=0; i<=s.n-1; i++) { s.x[i] = s.x[i]*hx+xa; } for(i=0; i<=s.m-1; i++) { s.y[i] = s.y[i]*hy+ya; } for(i=0; i<=s.n*s.m*d-1; i++) { s.f[sfx+i] = s.f[sfx+i]*invhx; s.f[sfy+i] = s.f[sfy+i]*invhy; s.f[sfxy+i] = s.f[sfxy+i]*invhx*invhy; } rep.rmserror = 0; rep.avgerror = 0; rep.maxerror = 0; rep.r2 = 1.0; return; } // // Build 1D compact basis function // Generate design matrix // tmpx = new double[7]; tmpy = new double[7]; tmpx[0] = -3; tmpx[1] = -2; tmpx[2] = -1; tmpx[3] = 0; tmpx[4] = 1; tmpx[5] = 2; tmpx[6] = 3; tmpy[0] = 0; tmpy[1] = 0; tmpy[2] = (double)1/(double)12; tmpy[3] = (double)2/(double)6; tmpy[4] = (double)1/(double)12; tmpy[5] = 0; tmpy[6] = 0; spline1d.spline1dbuildcubic(tmpx, tmpy, alglib.ap.len(tmpx), 2, 0.0, 2, 0.0, basis1, _params); // // Solve. // Update spline. // if( state.solvertype==1 ) { // // BlockLLS // reorderdatasetandbuildindex(xywork, npoints, d, tmp0, 0, kx, ky, ref xyindex, ref tmpi, _params); xdesigngenerate(xywork, xyindex, 0, kx, kx, 0, ky, ky, d, lambdaregblocklls, state.smoothing, basis1, xdesignmatrix, _params); blockllsfit(xdesignmatrix, state.lsqrcnt, ref z, rep, tss, blockllsbuf, _params); updatesplinetable(z, kx, ky, d, basis1, bfrad, s.f, s.m, s.n, 1, _params); } else { if( state.solvertype==2 ) { // // NaiveLLS, reference implementation // generatedesignmatrix(xywork, npoints, d, kx, ky, state.smoothing, lambdaregblocklls, basis1, av, ah, ref arows, _params); naivellsfit(av, ah, arows, xywork, kx, ky, npoints, d, state.lsqrcnt, ref z, rep, tss, _params); updatesplinetable(z, kx, ky, d, basis1, bfrad, s.f, s.m, s.n, 1, _params); } else { if( state.solvertype==3 ) { // // FastDDM method // alglib.ap.assert(basecasex>0, "Spline2DFit: integrity error"); alglib.ap.assert(basecasey>0, "Spline2DFit: integrity error"); fastddmfit(xywork, npoints, d, kx, ky, basecasex, basecasey, state.maxcoresize, state.interfacesize, state.nlayers, state.smoothing, state.lsqrcnt, basis1, s, rep, tss, _params); } else { alglib.ap.assert(false, "Spline2DFit: integrity error"); } } } // // Append prior term. // Transform spline to original coordinates // for(k=0; k<=s.n*s.m-1; k++) { k0 = k%s.n; k1 = k/s.n; for(j=0; j<=d-1; j++) { dstidx = d*(k1*s.n+k0)+j; s.f[dstidx] = s.f[dstidx]+vterm[j,0]*s.x[k0]+vterm[j,1]*s.y[k1]+vterm[j,2]; s.f[sfx+dstidx] = s.f[sfx+dstidx]+vterm[j,0]; s.f[sfy+dstidx] = s.f[sfy+dstidx]+vterm[j,1]; } } for(i=0; i<=s.n-1; i++) { s.x[i] = s.x[i]*hx+xa; } for(i=0; i<=s.m-1; i++) { s.y[i] = s.y[i]*hy+ya; } for(i=0; i<=s.n*s.m*d-1; i++) { s.f[sfx+i] = s.f[sfx+i]*invhx; s.f[sfy+i] = s.f[sfy+i]*invhy; s.f[sfxy+i] = s.f[sfxy+i]*invhx*invhy; } } /************************************************************************* Serializer: allocation -- ALGLIB -- Copyright 28.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dalloc(alglib.serializer s, spline2dinterpolant spline, alglib.xparams _params) { // // Header // s.alloc_entry(); // // Data // s.alloc_entry(); s.alloc_entry(); s.alloc_entry(); s.alloc_entry(); apserv.allocrealarray(s, spline.x, -1, _params); apserv.allocrealarray(s, spline.y, -1, _params); apserv.allocrealarray(s, spline.f, -1, _params); } /************************************************************************* Serializer: serialization -- ALGLIB -- Copyright 28.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dserialize(alglib.serializer s, spline2dinterpolant spline, alglib.xparams _params) { // // Header // s.serialize_int(scodes.getspline2dserializationcode(_params)); // // Data // s.serialize_int(spline.stype); s.serialize_int(spline.n); s.serialize_int(spline.m); s.serialize_int(spline.d); apserv.serializerealarray(s, spline.x, -1, _params); apserv.serializerealarray(s, spline.y, -1, _params); apserv.serializerealarray(s, spline.f, -1, _params); } /************************************************************************* Serializer: unserialization -- ALGLIB -- Copyright 28.02.2018 by Bochkanov Sergey *************************************************************************/ public static void spline2dunserialize(alglib.serializer s, spline2dinterpolant spline, alglib.xparams _params) { int scode = 0; // // Header // scode = s.unserialize_int(); alglib.ap.assert(scode==scodes.getspline2dserializationcode(_params), "Spline2DUnserialize: stream header corrupted"); // // Data // spline.stype = s.unserialize_int(); spline.n = s.unserialize_int(); spline.m = s.unserialize_int(); spline.d = s.unserialize_int(); apserv.unserializerealarray(s, ref spline.x, _params); apserv.unserializerealarray(s, ref spline.y, _params); apserv.unserializerealarray(s, ref spline.f, _params); } /************************************************************************* Internal subroutine. Calculation of the first derivatives and the cross-derivative. *************************************************************************/ private static void bicubiccalcderivatives(double[,] a, double[] x, double[] y, int m, int n, ref double[,] dx, ref double[,] dy, ref double[,] dxy, alglib.xparams _params) { int i = 0; int j = 0; double[] xt = new double[0]; double[] ft = new double[0]; double s = 0; double ds = 0; double d2s = 0; spline1d.spline1dinterpolant c = new spline1d.spline1dinterpolant(); dx = new double[0,0]; dy = new double[0,0]; dxy = new double[0,0]; dx = new double[m, n]; dy = new double[m, n]; dxy = new double[m, n]; // // dF/dX // xt = new double[n]; ft = new double[n]; for(i=0; i<=m-1; i++) { for(j=0; j<=n-1; j++) { xt[j] = x[j]; ft[j] = a[i,j]; } spline1d.spline1dbuildcubic(xt, ft, n, 0, 0.0, 0, 0.0, c, _params); for(j=0; j<=n-1; j++) { spline1d.spline1ddiff(c, x[j], ref s, ref ds, ref d2s, _params); dx[i,j] = ds; } } // // dF/dY // xt = new double[m]; ft = new double[m]; for(j=0; j<=n-1; j++) { for(i=0; i<=m-1; i++) { xt[i] = y[i]; ft[i] = a[i,j]; } spline1d.spline1dbuildcubic(xt, ft, m, 0, 0.0, 0, 0.0, c, _params); for(i=0; i<=m-1; i++) { spline1d.spline1ddiff(c, y[i], ref s, ref ds, ref d2s, _params); dy[i,j] = ds; } } // // d2F/dXdY // xt = new double[n]; ft = new double[n]; for(i=0; i<=m-1; i++) { for(j=0; j<=n-1; j++) { xt[j] = x[j]; ft[j] = dy[i,j]; } spline1d.spline1dbuildcubic(xt, ft, n, 0, 0.0, 0, 0.0, c, _params); for(j=0; j<=n-1; j++) { spline1d.spline1ddiff(c, x[j], ref s, ref ds, ref d2s, _params); dxy[i,j] = ds; } } } /************************************************************************* This function generates design matrix for the problem (in fact, two design matrices are generated: "vertical" one and transposed (horizontal) one. INPUT PARAMETERS: XY - array[NPoints*(2+D)]; dataset after scaling in such way that grid step is equal to 1.0 in both dimensions. NPoints - dataset size, NPoints>=1 KX, KY - grid size, KX,KY>=4 Smoothing - nonlinearity penalty coefficient, >=0 LambdaReg - regularization coefficient, >=0 Basis1 - basis spline, expected to be non-zero only at [-2,+2] AV, AH - possibly preallocated buffers OUTPUT PARAMETERS: AV - sparse matrix[ARows,KX*KY]; design matrix AH - transpose of AV ARows - number of rows in design matrix -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void generatedesignmatrix(double[] xy, int npoints, int d, int kx, int ky, double smoothing, double lambdareg, spline1d.spline1dinterpolant basis1, sparse.sparsematrix av, sparse.sparsematrix ah, ref int arows, alglib.xparams _params) { int nzwidth = 0; int nzshift = 0; int ew = 0; int i = 0; int j0 = 0; int j1 = 0; int k0 = 0; int k1 = 0; int dstidx = 0; double v = 0; double v0 = 0; double v1 = 0; double v2 = 0; double w0 = 0; double w1 = 0; double w2 = 0; int[] crx = new int[0]; int[] cry = new int[0]; int[] nrs = new int[0]; double[,] d2x = new double[0,0]; double[,] d2y = new double[0,0]; double[,] dxy = new double[0,0]; arows = 0; nzwidth = 4; nzshift = 1; alglib.ap.assert(npoints>0, "Spline2DFit: integrity check failed"); alglib.ap.assert(kx>=nzwidth, "Spline2DFit: integrity check failed"); alglib.ap.assert(ky>=nzwidth, "Spline2DFit: integrity check failed"); ew = 2+d; // // Determine canonical rectangle for every point. Every point of the dataset is // influenced by at most NZWidth*NZWidth basis functions, which form NZWidth*NZWidth // canonical rectangle. // // Thus, we have (KX-NZWidth+1)*(KY-NZWidth+1) overlapping canonical rectangles. // Assigning every point to its rectangle simplifies creation of sparse basis // matrix at the next steps. // crx = new int[npoints]; cry = new int[npoints]; for(i=0; i<=npoints-1; i++) { crx[i] = apserv.iboundval((int)Math.Floor(xy[i*ew+0])-nzshift, 0, kx-nzwidth, _params); cry[i] = apserv.iboundval((int)Math.Floor(xy[i*ew+1])-nzshift, 0, ky-nzwidth, _params); } // // Create vertical and horizontal design matrices // arows = npoints+kx*ky; if( (double)(smoothing)!=(double)(0.0) ) { alglib.ap.assert((double)(smoothing)>(double)(0.0), "Spline2DFit: integrity check failed"); arows = arows+3*(kx-2)*(ky-2); } nrs = new int[arows]; dstidx = 0; for(i=0; i<=npoints-1; i++) { nrs[dstidx+i] = nzwidth*nzwidth; } dstidx = dstidx+npoints; for(i=0; i<=kx*ky-1; i++) { nrs[dstidx+i] = 1; } dstidx = dstidx+kx*ky; if( (double)(smoothing)!=(double)(0.0) ) { for(i=0; i<=3*(kx-2)*(ky-2)-1; i++) { nrs[dstidx+i] = 3*3; } dstidx = dstidx+3*(kx-2)*(ky-2); } alglib.ap.assert(dstidx==arows, "Spline2DFit: integrity check failed"); sparse.sparsecreatecrs(arows, kx*ky, nrs, av, _params); dstidx = 0; for(i=0; i<=npoints-1; i++) { for(j1=0; j1<=nzwidth-1; j1++) { for(j0=0; j0<=nzwidth-1; j0++) { v0 = spline1d.spline1dcalc(basis1, xy[i*ew+0]-(crx[i]+j0), _params); v1 = spline1d.spline1dcalc(basis1, xy[i*ew+1]-(cry[i]+j1), _params); sparse.sparseset(av, dstidx+i, (cry[i]+j1)*kx+(crx[i]+j0), v0*v1, _params); } } } dstidx = dstidx+npoints; for(i=0; i<=kx*ky-1; i++) { sparse.sparseset(av, dstidx+i, i, lambdareg, _params); } dstidx = dstidx+kx*ky; if( (double)(smoothing)!=(double)(0.0) ) { // // Smoothing is applied. Because all grid nodes are same, // we apply same smoothing kernel, which is calculated only // once at the beginning of design matrix generation. // d2x = new double[3, 3]; d2y = new double[3, 3]; dxy = new double[3, 3]; for(j1=0; j1<=2; j1++) { for(j0=0; j0<=2; j0++) { d2x[j0,j1] = 0.0; d2y[j0,j1] = 0.0; dxy[j0,j1] = 0.0; } } for(k1=0; k1<=2; k1++) { for(k0=0; k0<=2; k0++) { spline1d.spline1ddiff(basis1, -(k0-1), ref v0, ref v1, ref v2, _params); spline1d.spline1ddiff(basis1, -(k1-1), ref w0, ref w1, ref w2, _params); d2x[k0,k1] = d2x[k0,k1]+v2*w0; d2y[k0,k1] = d2y[k0,k1]+w2*v0; dxy[k0,k1] = dxy[k0,k1]+v1*w1; } } // // Now, kernel is ready - apply it to all inner nodes of the grid. // for(j1=1; j1<=ky-2; j1++) { for(j0=1; j0<=kx-2; j0++) { // // d2F/dx2 term // v = smoothing; for(k1=-1; k1<=1; k1++) { for(k0=-1; k0<=1; k0++) { sparse.sparseset(av, dstidx, (j1+k1)*kx+(j0+k0), v*d2x[1+k0,1+k1], _params); } } dstidx = dstidx+1; // // d2F/dy2 term // v = smoothing; for(k1=-1; k1<=1; k1++) { for(k0=-1; k0<=1; k0++) { sparse.sparseset(av, dstidx, (j1+k1)*kx+(j0+k0), v*d2y[1+k0,1+k1], _params); } } dstidx = dstidx+1; // // 2*d2F/dxdy term // v = Math.Sqrt(2)*smoothing; for(k1=-1; k1<=1; k1++) { for(k0=-1; k0<=1; k0++) { sparse.sparseset(av, dstidx, (j1+k1)*kx+(j0+k0), v*dxy[1+k0,1+k1], _params); } } dstidx = dstidx+1; } } } alglib.ap.assert(dstidx==arows, "Spline2DFit: integrity check failed"); sparse.sparsecopy(av, ah, _params); sparse.sparsetransposecrs(ah, _params); } /************************************************************************* This function updates table of spline values/derivatives using coefficients for a layer of basis functions. -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void updatesplinetable(double[] z, int kx, int ky, int d, spline1d.spline1dinterpolant basis1, int bfrad, double[] ftbl, int m, int n, int scalexy, alglib.xparams _params) { int k = 0; int k0 = 0; int k1 = 0; int j = 0; int j0 = 0; int j1 = 0; int j0a = 0; int j0b = 0; int j1a = 0; int j1b = 0; double v = 0; double v0 = 0; double v1 = 0; double v01 = 0; double v11 = 0; double rdummy = 0; int dstidx = 0; int sfx = 0; int sfy = 0; int sfxy = 0; double invscalexy = 0; alglib.ap.assert(n==(kx-1)*scalexy+1, "Spline2DFit.UpdateSplineTable: integrity check failed"); alglib.ap.assert(m==(ky-1)*scalexy+1, "Spline2DFit.UpdateSplineTable: integrity check failed"); invscalexy = (double)1/(double)scalexy; sfx = n*m*d; sfy = 2*n*m*d; sfxy = 3*n*m*d; for(k=0; k<=kx*ky-1; k++) { k0 = k%kx; k1 = k/kx; j0a = apserv.iboundval(k0*scalexy-(bfrad*scalexy-1), 0, n-1, _params); j0b = apserv.iboundval(k0*scalexy+(bfrad*scalexy-1), 0, n-1, _params); j1a = apserv.iboundval(k1*scalexy-(bfrad*scalexy-1), 0, m-1, _params); j1b = apserv.iboundval(k1*scalexy+(bfrad*scalexy-1), 0, m-1, _params); for(j1=j1a; j1<=j1b; j1++) { spline1d.spline1ddiff(basis1, (j1-k1*scalexy)*invscalexy, ref v1, ref v11, ref rdummy, _params); v11 = v11*invscalexy; for(j0=j0a; j0<=j0b; j0++) { spline1d.spline1ddiff(basis1, (j0-k0*scalexy)*invscalexy, ref v0, ref v01, ref rdummy, _params); v01 = v01*invscalexy; for(j=0; j<=d-1; j++) { dstidx = d*(j1*n+j0)+j; v = z[j*kx*ky+k]; ftbl[dstidx] = ftbl[dstidx]+v0*v1*v; ftbl[sfx+dstidx] = ftbl[sfx+dstidx]+v01*v1*v; ftbl[sfy+dstidx] = ftbl[sfy+dstidx]+v0*v11*v; ftbl[sfxy+dstidx] = ftbl[sfxy+dstidx]+v01*v11*v; } } } } } /************************************************************************* This function performs fitting with FastDDM solver. Internal function, never use it directly. INPUT PARAMETERS: XY - array[NPoints*(2+D)], dataset; destroyed in process KX, KY - grid size TileSize - tile size InterfaceSize- interface size NPoints - points count D - number of components in vector-valued spline, D>=1 LSQRCnt - number of iterations, non-zero: * LSQRCnt>0 means that specified amount of preconditioned LSQR iterations will be performed to solve problem; usually we need 2..5 its. Recommended option - best convergence and stability/quality. * LSQRCnt<0 means that instead of LSQR we use iterative refinement on normal equations. Again, 2..5 its is enough. Basis1 - basis spline, expected to be non-zero only at [-2,+2] Z - possibly preallocated buffer for solution Residuals - possibly preallocated buffer for residuals at dataset points Rep - report structure; fields which are not set by this function are left intact TSS - total sum of squares; used to calculate R2 OUTPUT PARAMETERS: XY - destroyed in process Z - array[KX*KY*D], filled by solution; KX*KY coefficients corresponding to each of D dimensions are stored contiguously. Rep - following fields are set: * Rep.RMSError * Rep.AvgError * Rep.MaxError * Rep.R2 -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void fastddmfit(double[] xy, int npoints, int d, int kx, int ky, int basecasex, int basecasey, int maxcoresize, int interfacesize, int nlayers, double smoothing, int lsqrcnt, spline1d.spline1dinterpolant basis1, spline2dinterpolant spline, spline2dfitreport rep, double tss, alglib.xparams _params) { int i = 0; int j = 0; int nzwidth = 0; int xew = 0; int ntotallayers = 0; int scaleidx = 0; int scalexy = 0; double invscalexy = 0; int kxcur = 0; int kycur = 0; int tilescount0 = 0; int tilescount1 = 0; double v = 0; double rss = 0; double[] yraw = new double[0]; int[] xyindex = new int[0]; double[] tmp0 = new double[0]; int[] bufi = new int[0]; spline2dfastddmbuf seed = new spline2dfastddmbuf(); alglib.smp.shared_pool pool = new alglib.smp.shared_pool(); spline2dxdesignmatrix xdesignmatrix = new spline2dxdesignmatrix(); spline2dblockllsbuf blockllsbuf = new spline2dblockllsbuf(); spline2dfitreport dummyrep = new spline2dfitreport(); // // Dataset metrics and integrity checks // nzwidth = 4; xew = 2+d; alglib.ap.assert(maxcoresize>=2, "Spline2DFit: integrity check failed"); alglib.ap.assert(interfacesize>=1, "Spline2DFit: integrity check failed"); alglib.ap.assert(kx>=nzwidth, "Spline2DFit: integrity check failed"); alglib.ap.assert(ky>=nzwidth, "Spline2DFit: integrity check failed"); // // Verify consistency of the grid size (KX,KY) with basecase sizes. // Determine full number of layers. // alglib.ap.assert(basecasex<=maxcoresize, "Spline2DFit: integrity error"); alglib.ap.assert(basecasey<=maxcoresize, "Spline2DFit: integrity error"); ntotallayers = 1; scalexy = 1; kxcur = kx; kycur = ky; while( kxcur>basecasex+1 && kycur>basecasey+1 ) { alglib.ap.assert(kxcur%2==1, "Spline2DFit: integrity error"); alglib.ap.assert(kycur%2==1, "Spline2DFit: integrity error"); kxcur = (kxcur-1)/2+1; kycur = (kycur-1)/2+1; scalexy = scalexy*2; apserv.inc(ref ntotallayers, _params); } invscalexy = (double)1/(double)scalexy; alglib.ap.assert((kxcur<=maxcoresize+1 && kxcur==basecasex+1) || kxcur%basecasex==1, "Spline2DFit: integrity error"); alglib.ap.assert((kycur<=maxcoresize+1 && kycur==basecasey+1) || kycur%basecasey==1, "Spline2DFit: integrity error"); alglib.ap.assert(kxcur==basecasex+1 || kycur==basecasey+1, "Spline2DFit: integrity error"); // // Initial scaling of dataset. // Store original target values to YRaw. // apserv.rvectorsetlengthatleast(ref yraw, npoints*d, _params); for(i=0; i<=npoints-1; i++) { xy[xew*i+0] = xy[xew*i+0]*invscalexy; xy[xew*i+1] = xy[xew*i+1]*invscalexy; for(j=0; j<=d-1; j++) { yraw[i*d+j] = xy[xew*i+2+j]; } } kxcur = (kx-1)/scalexy+1; kycur = (ky-1)/scalexy+1; // // Build initial dataset index; area is divided into (KXCur-1)*(KYCur-1) // cells, with contiguous storage of points in the same cell. // Iterate over different scales // alglib.smp.ae_shared_pool_set_seed(pool, seed); reorderdatasetandbuildindex(xy, npoints, d, yraw, d, kxcur, kycur, ref xyindex, ref bufi, _params); for(scaleidx=ntotallayers-1; scaleidx>=0; scaleidx--) { if( (nlayers>0 && scaleidx=2 ) { if( tiley1-tiley0>tilex1-tilex0 ) { // // Split problem in Y dimension // // NOTE: recursive calls to FastDDMFitLayer() compute // residuals in the inner cells defined by XYIndex[], // but we still have to compute residuals for cells // BETWEEN two recursive subdivisions of the task. // apserv.tiledsplit(tiley1-tiley0, 1, ref j0, ref j1, _params); fastddmfitlayer(xy, d, scalexy, xyindex, basecasex, tilex0, tilex1, tilescountx, basecasey, tiley0, tiley0+j0, tilescounty, maxcoresize, interfacesize, lsqrcnt, lambdareg, basis1, pool, spline, _params); fastddmfitlayer(xy, d, scalexy, xyindex, basecasex, tilex0, tilex1, tilescountx, basecasey, tiley0+j0, tiley1, tilescounty, maxcoresize, interfacesize, lsqrcnt, lambdareg, basis1, pool, spline, _params); } else { // // Split problem in X dimension // // NOTE: recursive calls to FastDDMFitLayer() compute // residuals in the inner cells defined by XYIndex[], // but we still have to compute residuals for cells // BETWEEN two recursive subdivisions of the task. // apserv.tiledsplit(tilex1-tilex0, 1, ref j0, ref j1, _params); fastddmfitlayer(xy, d, scalexy, xyindex, basecasex, tilex0, tilex0+j0, tilescountx, basecasey, tiley0, tiley1, tilescounty, maxcoresize, interfacesize, lsqrcnt, lambdareg, basis1, pool, spline, _params); fastddmfitlayer(xy, d, scalexy, xyindex, basecasex, tilex0+j0, tilex1, tilescountx, basecasey, tiley0, tiley1, tilescounty, maxcoresize, interfacesize, lsqrcnt, lambdareg, basis1, pool, spline, _params); } return; } alglib.ap.assert(tiley0==tiley1-1, "Spline2DFit.FastDDMFitLayer: integrity check failed"); alglib.ap.assert(tilex0==tilex1-1, "Spline2DFit.FastDDMFitLayer: integrity check failed"); tile1 = tiley0; tile0 = tilex0; // // Retrieve temporaries // alglib.smp.ae_shared_pool_retrieve(pool, ref buf); // // Analyze dataset // xa = apserv.iboundval(tile0*basecasex-interfacesize, 0, kx, _params); xb = apserv.iboundval((tile0+1)*basecasex+interfacesize, 0, kx, _params); ya = apserv.iboundval(tile1*basecasey-interfacesize, 0, ky, _params); yb = apserv.iboundval((tile1+1)*basecasey+interfacesize, 0, ky, _params); tilesize0 = xb-xa; tilesize1 = yb-ya; // // Solve current chunk with BlockLLS // dummytss = 1.0; xdesigngenerate(xy, xyindex, xa, xb, kx, ya, yb, ky, d, lambdareg, 0.0, basis1, buf.xdesignmatrix, _params); blockllsfit(buf.xdesignmatrix, lsqrcnt, ref buf.tmpz, buf.dummyrep, dummytss, buf.blockllsbuf, _params); buf.localmodel.d = d; buf.localmodel.m = tilesize1; buf.localmodel.n = tilesize0; buf.localmodel.stype = -3; apserv.rvectorsetlengthatleast(ref buf.localmodel.x, tilesize0, _params); apserv.rvectorsetlengthatleast(ref buf.localmodel.y, tilesize1, _params); apserv.rvectorsetlengthatleast(ref buf.localmodel.f, tilesize0*tilesize1*d*4, _params); for(i=0; i<=tilesize0-1; i++) { buf.localmodel.x[i] = xa+i; } for(i=0; i<=tilesize1-1; i++) { buf.localmodel.y[i] = ya+i; } for(i=0; i<=tilesize0*tilesize1*d*4-1; i++) { buf.localmodel.f[i] = 0.0; } updatesplinetable(buf.tmpz, tilesize0, tilesize1, d, basis1, bfrad, buf.localmodel.f, tilesize1, tilesize0, 1, _params); // // Transform local spline to original coordinates // sfx = buf.localmodel.n*buf.localmodel.m*d; sfy = 2*buf.localmodel.n*buf.localmodel.m*d; sfxy = 3*buf.localmodel.n*buf.localmodel.m*d; for(i=0; i<=tilesize0-1; i++) { buf.localmodel.x[i] = buf.localmodel.x[i]*scalexy; } for(i=0; i<=tilesize1-1; i++) { buf.localmodel.y[i] = buf.localmodel.y[i]*scalexy; } for(i=0; i<=tilesize0*tilesize1*d-1; i++) { buf.localmodel.f[sfx+i] = buf.localmodel.f[sfx+i]*invscalexy; buf.localmodel.f[sfy+i] = buf.localmodel.f[sfy+i]*invscalexy; buf.localmodel.f[sfxy+i] = buf.localmodel.f[sfxy+i]*(invscalexy*invscalexy); } // // Output results; for inner and topmost/leftmost tiles we output only BasecaseX*BasecaseY // inner elements; for rightmost/bottom ones we also output one column/row of the interface // part. // // Such complexity is explained by the fact that area size (by design) is not evenly divisible // by the tile size; it is divisible with remainder=1, and we expect that interface size is // at least 1, so we can fill the missing rightmost/bottom elements of Z by the interface // values. // alglib.ap.assert(interfacesize>=1, "Spline2DFit: integrity check failed"); sfx = spline.n*spline.m*d; sfy = 2*spline.n*spline.m*d; sfxy = 3*spline.n*spline.m*d; cnt0 = basecasex*scalexy; cnt1 = basecasey*scalexy; if( tile0==tilescountx-1 ) { apserv.inc(ref cnt0, _params); } if( tile1==tilescounty-1 ) { apserv.inc(ref cnt1, _params); } offs = d*(spline.n*tile1*basecasey*scalexy+tile0*basecasex*scalexy); for(j1=0; j1<=cnt1-1; j1++) { for(j0=0; j0<=cnt0-1; j0++) { for(j=0; j<=d-1; j++) { spline2ddiffvi(buf.localmodel, tile0*basecasex*scalexy+j0, tile1*basecasey*scalexy+j1, j, ref vs, ref vsx, ref vsy, ref vsxy, _params); spline.f[offs+d*(spline.n*j1+j0)+j] = spline.f[offs+d*(spline.n*j1+j0)+j]+vs; spline.f[sfx+offs+d*(spline.n*j1+j0)+j] = spline.f[sfx+offs+d*(spline.n*j1+j0)+j]+vsx; spline.f[sfy+offs+d*(spline.n*j1+j0)+j] = spline.f[sfy+offs+d*(spline.n*j1+j0)+j]+vsy; spline.f[sfxy+offs+d*(spline.n*j1+j0)+j] = spline.f[sfxy+offs+d*(spline.n*j1+j0)+j]+vsxy; } } } // // Recycle temporaries // alglib.smp.ae_shared_pool_recycle(pool, ref buf); } /************************************************************************* Serial stub for GPL edition. *************************************************************************/ public static bool _trypexec_fastddmfitlayer(double[] xy, int d, int scalexy, int[] xyindex, int basecasex, int tilex0, int tilex1, int tilescountx, int basecasey, int tiley0, int tiley1, int tilescounty, int maxcoresize, int interfacesize, int lsqrcnt, double lambdareg, spline1d.spline1dinterpolant basis1, alglib.smp.shared_pool pool, spline2dinterpolant spline, alglib.xparams _params) { return false; } /************************************************************************* This function performs fitting with BlockLLS solver. Internal function, never use it directly. IMPORTANT: performance and memory requirements of this function are asymmetric w.r.t. KX and KY: it has * O(KY*KX^2) memory requirements * O(KY*KX^3) running time Thus, if you have large KY and small KX, simple transposition of your dataset may give you great speedup. INPUT PARAMETERS: AV - sparse matrix, [ARows,KX*KY] in size. "Vertical" version of design matrix, rows [0,NPoints) contain values of basis functions at dataset points. Other rows are used for nonlinearity penalty and other stuff like that. AH - transpose(AV), "horizontal" version of AV ARows - rows count XY - array[NPoints*(2+D)], dataset KX, KY - grid size NPoints - points count D - number of components in vector-valued spline, D>=1 LSQRCnt - number of iterations, non-zero: * LSQRCnt>0 means that specified amount of preconditioned LSQR iterations will be performed to solve problem; usually we need 2..5 its. Recommended option - best convergence and stability/quality. * LSQRCnt<0 means that instead of LSQR we use iterative refinement on normal equations. Again, 2..5 its is enough. Z - possibly preallocated buffer for solution Rep - report structure; fields which are not set by this function are left intact TSS - total sum of squares; used to calculate R2 OUTPUT PARAMETERS: XY - destroyed in process Z - array[KX*KY*D], filled by solution; KX*KY coefficients corresponding to each of D dimensions are stored contiguously. Rep - following fields are set: * Rep.RMSError * Rep.AvgError * Rep.MaxError * Rep.R2 -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void blockllsfit(spline2dxdesignmatrix xdesign, int lsqrcnt, ref double[] z, spline2dfitreport rep, double tss, spline2dblockllsbuf buf, alglib.xparams _params) { int blockbandwidth = 0; int d = 0; int i = 0; int j = 0; double lambdachol = 0; apserv.sreal mxata = new apserv.sreal(); double v = 0; int celloffset = 0; int i0 = 0; int i1 = 0; double rss = 0; int arows = 0; int bw2 = 0; int kx = 0; int ky = 0; alglib.ap.assert(xdesign.blockwidth==4, "Spline2DFit: integrity check failed"); blockbandwidth = 3; d = xdesign.d; arows = xdesign.nrows; kx = xdesign.kx; ky = xdesign.ky; bw2 = xdesign.blockwidth*xdesign.blockwidth; // // Initial values for Z/Residuals // apserv.rvectorsetlengthatleast(ref z, kx*ky*d, _params); for(i=0; i<=kx*ky*d-1; i++) { z[i] = 0; } // // Create and factorize design matrix. Add regularizer if // factorization failed (happens sometimes with zero // smoothing and sparsely populated datasets). // // The algorithm below is refactoring of NaiveLLS algorithm, // which uses sparsity properties and compressed block storage. // // Problem sparsity pattern results in block-band-diagonal // matrix (block matrix with limited bandwidth, equal to 3 // for bicubic splines). Thus, we have KY*KY blocks, each // of them is KX*KX in size. Design matrix is stored in // large NROWS*KX matrix, with NROWS=(BlockBandwidth+1)*KY*KX. // // We use adaptation of block skyline storage format, with // TOWERSIZE*KX skyline bands (towers) stored sequentially; // here TOWERSIZE=(BlockBandwidth+1)*KX. So, we have KY // "towers", stored one below other, in BlockATA matrix. // Every "tower" is a sequence of BlockBandwidth+1 cells, // each of them being KX*KX in size. // lambdachol = cholreg; apserv.rmatrixsetlengthatleast(ref buf.blockata, (blockbandwidth+1)*ky*kx, kx, _params); while( true ) { // // Parallel generation of squared design matrix. // xdesignblockata(xdesign, buf.blockata, ref mxata.val, _params); // // Regularization // v = apserv.coalesce(mxata.val, 1.0, _params)*lambdachol; for(i1=0; i1<=ky-1; i1++) { celloffset = getcelloffset(kx, ky, blockbandwidth, i1, i1, _params); for(i0=0; i0<=kx-1; i0++) { buf.blockata[celloffset+i0,i0] = buf.blockata[celloffset+i0,i0]+v; } } // // Try Cholesky factorization. // if( !blockllscholesky(buf.blockata, kx, ky, ref buf.trsmbuf2, ref buf.cholbuf2, ref buf.cholbuf1, _params) ) { // // Factorization failed, increase regularizer and repeat // lambdachol = apserv.coalesce(10*lambdachol, 1.0E-12, _params); continue; } break; } // // Solve // rss = 0.0; rep.rmserror = 0; rep.avgerror = 0; rep.maxerror = 0; alglib.ap.assert(lsqrcnt>0, "Spline2DFit: integrity failure"); apserv.rvectorsetlengthatleast(ref buf.tmp0, arows, _params); apserv.rvectorsetlengthatleast(ref buf.tmp1, kx*ky, _params); linlsqr.linlsqrcreatebuf(arows, kx*ky, buf.solver, _params); for(j=0; j<=d-1; j++) { // // Preconditioned LSQR: // // use Cholesky factor U of squared design matrix A'*A to // transform min|A*x-b| to min|[A*inv(U)]*y-b| with y=U*x. // // Preconditioned problem is solved with LSQR solver, which // gives superior results than normal equations. // for(i=0; i<=arows-1; i++) { if( i=1 LSQRCnt - number of iterations, non-zero: * LSQRCnt>0 means that specified amount of preconditioned LSQR iterations will be performed to solve problem; usually we need 2..5 its. Recommended option - best convergence and stability/quality. * LSQRCnt<0 means that instead of LSQR we use iterative refinement on normal equations. Again, 2..5 its is enough. Z - possibly preallocated buffer for solution Rep - report structure; fields which are not set by this function are left intact TSS - total sum of squares; used to calculate R2 OUTPUT PARAMETERS: XY - destroyed in process Z - array[KX*KY*D], filled by solution; KX*KY coefficients corresponding to each of D dimensions are stored contiguously. Rep - following fields are set: * Rep.RMSError * Rep.AvgError * Rep.MaxError * Rep.R2 -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void naivellsfit(sparse.sparsematrix av, sparse.sparsematrix ah, int arows, double[] xy, int kx, int ky, int npoints, int d, int lsqrcnt, ref double[] z, spline2dfitreport rep, double tss, alglib.xparams _params) { int ew = 0; int i = 0; int j = 0; int i0 = 0; int i1 = 0; int j0 = 0; int j1 = 0; double v = 0; int blockbandwidth = 0; double lambdareg = 0; int srci = 0; int srcj = 0; int idxi = 0; int idxj = 0; int endi = 0; int endj = 0; int rfsidx = 0; double[,] ata = new double[0,0]; double[] tmp0 = new double[0]; double[] tmp1 = new double[0]; double mxata = 0; linlsqr.linlsqrstate solver = new linlsqr.linlsqrstate(); linlsqr.linlsqrreport solverrep = new linlsqr.linlsqrreport(); double rss = 0; blockbandwidth = 3; ew = 2+d; // // Initial values for Z/Residuals // apserv.rvectorsetlengthatleast(ref z, kx*ky*d, _params); for(i=0; i<=kx*ky*d-1; i++) { z[i] = 0; } // // Create and factorize design matrix. // // Add regularizer if factorization failed (happens sometimes // with zero smoothing and sparsely populated datasets). // lambdareg = cholreg; apserv.rmatrixsetlengthatleast(ref ata, kx*ky, kx*ky, _params); while( true ) { mxata = 0.0; for(i=0; i<=kx*ky-1; i++) { for(j=i; j<=kx*ky-1; j++) { // // Initialize by zero // ata[i,j] = 0; // // Determine grid nodes corresponding to I and J; // skip if too far away // i0 = i%kx; i1 = i/kx; j0 = j%kx; j1 = j/kx; if( Math.Abs(i0-j0)>blockbandwidth || Math.Abs(i1-j1)>blockbandwidth ) { continue; } // // Nodes are close enough, calculate product of columns I and J of A. // v = 0; srci = ah.ridx[i]; srcj = ah.ridx[j]; endi = ah.ridx[i+1]; endj = ah.ridx[j+1]; while( true ) { if( srci>=endi || srcj>=endj ) { break; } idxi = ah.idx[srci]; idxj = ah.idx[srcj]; if( idxi==idxj ) { v = v+ah.vals[srci]*ah.vals[srcj]; srci = srci+1; srcj = srcj+1; continue; } if( idxi0 ) { linlsqr.linlsqrcreate(arows, kx*ky, solver, _params); } for(j=0; j<=d-1; j++) { alglib.ap.assert(lsqrcnt!=0, "Spline2DFit: integrity failure"); if( lsqrcnt>0 ) { // // Preconditioned LSQR: // // use Cholesky factor U of squared design matrix A'*A to // transform min|A*x-b| to min|[A*inv(U)]*y-b| with y=U*x. // // Preconditioned problem is solved with LSQR solver, which // gives superior results than normal equations. // linlsqr.linlsqrcreate(arows, kx*ky, solver, _params); for(i=0; i<=arows-1; i++) { if( i=0 && i=0 && j=i && j<=i+blockbandwidth, "Spline2DFit: GetCellOffset() integrity error"); result = j*(blockbandwidth+1)*kx; result = result+(blockbandwidth-(j-i))*kx; return result; } /************************************************************************* This is convenience function for band block storage format; it copies cell (I,J) from compressed format to uncompressed general matrix, at desired position. -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void copycellto(int kx, int ky, int blockbandwidth, double[,] blockata, int i, int j, double[,] dst, int dst0, int dst1, alglib.xparams _params) { int celloffset = 0; int idx0 = 0; int idx1 = 0; celloffset = getcelloffset(kx, ky, blockbandwidth, i, j, _params); for(idx0=0; idx0<=kx-1; idx0++) { for(idx1=0; idx1<=kx-1; idx1++) { dst[dst0+idx0,dst1+idx1] = blockata[celloffset+idx0,idx1]; } } } /************************************************************************* This is convenience function for band block storage format; it truncates all elements of cell (I,J) which are less than Eps in magnitude. -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void flushtozerocell(int kx, int ky, int blockbandwidth, double[,] blockata, int i, int j, double eps, alglib.xparams _params) { int celloffset = 0; int idx0 = 0; int idx1 = 0; double eps2 = 0; double v = 0; celloffset = getcelloffset(kx, ky, blockbandwidth, i, j, _params); eps2 = eps*eps; for(idx0=0; idx0<=kx-1; idx0++) { for(idx1=0; idx1<=kx-1; idx1++) { v = blockata[celloffset+idx0,idx1]; if( v*v=(double)(0), "BlockLLSGenerateATA: integrity check failed"); blockbandwidth = 3; // // Determine problem cost, perform recursive subdivision // (with optional parallelization) // avgrowlen = (double)ah.ridx[kx*ky]/(double)(kx*ky); cellcost = apserv.rmul3(kx, 1+2*blockbandwidth, avgrowlen, _params); totalcost = apserv.rmul3(ky1-ky0, 1+2*blockbandwidth, cellcost, _params); if( ky1-ky0>=2 && (double)(totalcost)>(double)(apserv.smpactivationlevel(_params)) ) { if( _trypexec_blockllsgenerateata(ah,ky0,ky1,kx,ky,blockata,mxata, _params) ) { return; } } if( ky1-ky0>=2 ) { // // Split X: X*A = (X1 X2)^T*A // j = (ky1-ky0)/2; blockllsgenerateata(ah, ky0, ky0+j, kx, ky, blockata, tmpmxata, _params); blockllsgenerateata(ah, ky0+j, ky1, kx, ky, blockata, mxata, _params); mxata.val = Math.Max(mxata.val, tmpmxata.val); return; } // // Splitting in Y-dimension is done, fill I1-th "tower" // alglib.ap.assert(ky1==ky0+1, "BlockLLSGenerateATA: integrity check failed"); i1 = ky0; for(j1=i1; j1<=Math.Min(ky-1, i1+blockbandwidth); j1++) { celloffset = getcelloffset(kx, ky, blockbandwidth, i1, j1, _params); // // Clear cell (I1,J1) // for(i0=0; i0<=kx-1; i0++) { for(j0=0; j0<=kx-1; j0++) { blockata[celloffset+i0,j0] = 0.0; } } // // Initialize cell internals // for(i0=0; i0<=kx-1; i0++) { for(j0=0; j0<=kx-1; j0++) { if( Math.Abs(i0-j0)<=blockbandwidth ) { // // Nodes are close enough, calculate product of columns I and J of A. // v = 0; i = i1*kx+i0; j = j1*kx+j0; srci = ah.ridx[i]; srcj = ah.ridx[j]; endi = ah.ridx[i+1]; endj = ah.ridx[j+1]; while( true ) { if( srci>=endi || srcj>=endj ) { break; } idxi = ah.idx[srci]; idxj = ah.idx[srcj]; if( idxi==idxj ) { v = v+ah.vals[srci]*ah.vals[srcj]; srci = srci+1; srcj = srcj+1; continue; } if( idxi=0; blockidx--) { for(blockidx1=1; blockidx1<=Math.Min(ky-(blockidx+1), blockbandwidth); blockidx1++) { celloffset = getcelloffset(kx, ky, blockbandwidth, blockidx, blockidx+blockidx1, _params); ablas.rmatrixgemv(kx, kx, -1.0, blockata, celloffset, 0, 0, b, (blockidx+blockidx1)*kx, 1.0, b, blockidx*kx, _params); } celloffset = getcelloffset(kx, ky, blockbandwidth, blockidx, blockidx, _params); ablas.rmatrixtrsv(kx, blockata, celloffset, 0, true, false, 0, b, blockidx*kx, _params); } } else { // // Solve U'*x=b // for(blockidx=0; blockidx<=ky-1; blockidx++) { celloffset = getcelloffset(kx, ky, blockbandwidth, blockidx, blockidx, _params); ablas.rmatrixtrsv(kx, blockata, celloffset, 0, true, false, 1, b, blockidx*kx, _params); for(blockidx1=1; blockidx1<=Math.Min(ky-(blockidx+1), blockbandwidth); blockidx1++) { celloffset = getcelloffset(kx, ky, blockbandwidth, blockidx, blockidx+blockidx1, _params); ablas.rmatrixgemv(kx, kx, -1.0, blockata, celloffset, 0, 1, b, blockidx*kx, 1.0, b, (blockidx+blockidx1)*kx, _params); } } } } /************************************************************************* This function computes residuals for dataset XY[], using array of original values YRaw[], and loads residuals to XY. Processing is performed in parallel manner. -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void computeresidualsfromscratch(double[] xy, double[] yraw, int npoints, int d, int scalexy, spline2dinterpolant spline, alglib.xparams _params) { apserv.srealarray seed = new apserv.srealarray(); alglib.smp.shared_pool pool = new alglib.smp.shared_pool(); int chunksize = 0; double pointcost = 0; // // Setting up // chunksize = 1000; pointcost = 100.0; if( (double)(npoints*pointcost)>(double)(apserv.smpactivationlevel(_params)) ) { if( _trypexec_computeresidualsfromscratch(xy,yraw,npoints,d,scalexy,spline, _params) ) { return; } } alglib.smp.ae_shared_pool_set_seed(pool, seed); // // Call compute workhorse // computeresidualsfromscratchrec(xy, yraw, 0, npoints, chunksize, d, scalexy, spline, pool, _params); } /************************************************************************* Serial stub for GPL edition. *************************************************************************/ public static bool _trypexec_computeresidualsfromscratch(double[] xy, double[] yraw, int npoints, int d, int scalexy, spline2dinterpolant spline, alglib.xparams _params) { return false; } /************************************************************************* Recursive workhorse for ComputeResidualsFromScratch. -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void computeresidualsfromscratchrec(double[] xy, double[] yraw, int pt0, int pt1, int chunksize, int d, int scalexy, spline2dinterpolant spline, alglib.smp.shared_pool pool, alglib.xparams _params) { int i = 0; int j = 0; apserv.srealarray pbuf = null; int xew = 0; xew = 2+d; // // Parallelism // if( pt1-pt0>chunksize ) { apserv.tiledsplit(pt1-pt0, chunksize, ref i, ref j, _params); computeresidualsfromscratchrec(xy, yraw, pt0, pt0+i, chunksize, d, scalexy, spline, pool, _params); computeresidualsfromscratchrec(xy, yraw, pt0+i, pt1, chunksize, d, scalexy, spline, pool, _params); return; } // // Serial execution // alglib.smp.ae_shared_pool_retrieve(pool, ref pbuf); for(i=pt0; i<=pt1-1; i++) { spline2dcalcvbuf(spline, xy[i*xew+0]*scalexy, xy[i*xew+1]*scalexy, ref pbuf.val, _params); for(j=0; j<=d-1; j++) { xy[i*xew+2+j] = yraw[i*d+j]-pbuf.val[j]; } } alglib.smp.ae_shared_pool_recycle(pool, ref pbuf); } /************************************************************************* Serial stub for GPL edition. *************************************************************************/ public static bool _trypexec_computeresidualsfromscratchrec(double[] xy, double[] yraw, int pt0, int pt1, int chunksize, int d, int scalexy, spline2dinterpolant spline, alglib.smp.shared_pool pool, alglib.xparams _params) { return false; } /************************************************************************* This function reorders dataset and builds index: * it is assumed that all points have X in [0,KX-1], Y in [0,KY-1] * area is divided into (KX-1)*(KY-1) cells * all points are reordered in such way that points in same cell are stored contiguously * dataset index, array[(KX-1)*(KY-1)+1], is generated. Points of cell I now have indexes XYIndex[I]..XYIndex[I+1]-1; INPUT PARAMETERS: XY - array[NPoints*(2+D)], dataset KX, KY, D - grid size and dimensionality of the outputs Shadow - shadow array[NPoints*NS], which is sorted together with XY; if NS=0, it is not referenced at all. NS - entry width of shadow array BufI - possibly preallocated temporary buffer; resized if needed. OUTPUT PARAMETERS: XY - reordered XYIndex - array[(KX-1)*(KY-1)+1], dataset index -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void reorderdatasetandbuildindex(double[] xy, int npoints, int d, double[] shadow, int ns, int kx, int ky, ref int[] xyindex, ref int[] bufi, alglib.xparams _params) { int i = 0; int i0 = 0; int i1 = 0; int entrywidth = 0; // // Set up // alglib.ap.assert(kx>=2, "Spline2DFit.ReorderDatasetAndBuildIndex: integrity check failed"); alglib.ap.assert(ky>=2, "Spline2DFit.ReorderDatasetAndBuildIndex: integrity check failed"); entrywidth = 2+d; apserv.ivectorsetlengthatleast(ref xyindex, (kx-1)*(ky-1)+1, _params); apserv.ivectorsetlengthatleast(ref bufi, npoints, _params); for(i=0; i<=npoints-1; i++) { i0 = apserv.iboundval((int)Math.Floor(xy[i*entrywidth+0]), 0, kx-2, _params); i1 = apserv.iboundval((int)Math.Floor(xy[i*entrywidth+1]), 0, ky-2, _params); bufi[i] = i1*(kx-1)+i0; } // // Reorder // reorderdatasetandbuildindexrec(xy, d, shadow, ns, bufi, 0, npoints, xyindex, 0, (kx-1)*(ky-1), true, _params); xyindex[(kx-1)*(ky-1)] = npoints; } /************************************************************************* This function multiplies all points in dataset by 2.0 and rebuilds index, given previous index built for KX_prev=(KX-1)/2 and KY_prev=(KY-1)/2 INPUT PARAMETERS: XY - array[NPoints*(2+D)], dataset BEFORE scaling NPoints, D - dataset size and dimensionality of the outputs Shadow - shadow array[NPoints*NS], which is sorted together with XY; if NS=0, it is not referenced at all. NS - entry width of shadow array KX, KY - new grid dimensionality XYIndex - index built for previous values of KX and KY BufI - possibly preallocated temporary buffer; resized if needed. OUTPUT PARAMETERS: XY - reordered and multiplied by 2.0 XYIndex - array[(KX-1)*(KY-1)+1], dataset index -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void rescaledatasetandrefineindex(double[] xy, int npoints, int d, double[] shadow, int ns, int kx, int ky, ref int[] xyindex, ref int[] bufi, alglib.xparams _params) { int[] xyindexprev = new int[0]; // // Set up // alglib.ap.assert(kx>=2, "Spline2DFit.RescaleDataset2AndRefineIndex: integrity check failed"); alglib.ap.assert(ky>=2, "Spline2DFit.RescaleDataset2AndRefineIndex: integrity check failed"); alglib.ap.assert((kx-1)%2==0, "Spline2DFit.RescaleDataset2AndRefineIndex: integrity check failed"); alglib.ap.assert((ky-1)%2==0, "Spline2DFit.RescaleDataset2AndRefineIndex: integrity check failed"); alglib.ap.swap(ref xyindex, ref xyindexprev); apserv.ivectorsetlengthatleast(ref xyindex, (kx-1)*(ky-1)+1, _params); apserv.ivectorsetlengthatleast(ref bufi, npoints, _params); // // Refine // expandindexrows(xy, d, shadow, ns, bufi, 0, npoints, xyindexprev, 0, (ky+1)/2-1, xyindex, kx, ky, true, _params); xyindex[(kx-1)*(ky-1)] = npoints; // // Integrity check // } /************************************************************************* Recurrent divide-and-conquer indexing function -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void expandindexrows(double[] xy, int d, double[] shadow, int ns, int[] cidx, int pt0, int pt1, int[] xyindexprev, int row0, int row1, int[] xyindexnew, int kxnew, int kynew, bool rootcall, alglib.xparams _params) { int i = 0; int entrywidth = 0; int kxprev = 0; double v = 0; int i0 = 0; int i1 = 0; double efficiency = 0; double cost = 0; int rowmid = 0; kxprev = (kxnew+1)/2; entrywidth = 2+d; efficiency = 0.1; cost = d*(pt1-pt0+1)*(Math.Log(kxnew)/Math.Log(2))/efficiency; alglib.ap.assert(xyindexprev[row0*(kxprev-1)+0]==pt0, "Spline2DFit.ExpandIndexRows: integrity check failed"); alglib.ap.assert(xyindexprev[row1*(kxprev-1)+0]==pt1, "Spline2DFit.ExpandIndexRows: integrity check failed"); // // Parallelism // if( ((rootcall && pt1-pt0>10000) && row1-row0>=2) && (double)(cost)>(double)(apserv.smpactivationlevel(_params)) ) { if( _trypexec_expandindexrows(xy,d,shadow,ns,cidx,pt0,pt1,xyindexprev,row0,row1,xyindexnew,kxnew,kynew,rootcall, _params) ) { return; } } // // Partition // if( row1-row0>=2 ) { apserv.tiledsplit(row1-row0, 1, ref i0, ref i1, _params); rowmid = row0+i0; expandindexrows(xy, d, shadow, ns, cidx, pt0, xyindexprev[rowmid*(kxprev-1)+0], xyindexprev, row0, rowmid, xyindexnew, kxnew, kynew, false, _params); expandindexrows(xy, d, shadow, ns, cidx, xyindexprev[rowmid*(kxprev-1)+0], pt1, xyindexprev, rowmid, row1, xyindexnew, kxnew, kynew, false, _params); return; } // // Serial execution // for(i=pt0; i<=pt1-1; i++) { v = 2*xy[i*entrywidth+0]; xy[i*entrywidth+0] = v; i0 = apserv.iboundval((int)Math.Floor(v), 0, kxnew-2, _params); v = 2*xy[i*entrywidth+1]; xy[i*entrywidth+1] = v; i1 = apserv.iboundval((int)Math.Floor(v), 0, kynew-2, _params); cidx[i] = i1*(kxnew-1)+i0; } reorderdatasetandbuildindexrec(xy, d, shadow, ns, cidx, pt0, pt1, xyindexnew, 2*row0*(kxnew-1)+0, 2*row1*(kxnew-1)+0, false, _params); } /************************************************************************* Serial stub for GPL edition. *************************************************************************/ public static bool _trypexec_expandindexrows(double[] xy, int d, double[] shadow, int ns, int[] cidx, int pt0, int pt1, int[] xyindexprev, int row0, int row1, int[] xyindexnew, int kxnew, int kynew, bool rootcall, alglib.xparams _params) { return false; } /************************************************************************* Recurrent divide-and-conquer indexing function -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void reorderdatasetandbuildindexrec(double[] xy, int d, double[] shadow, int ns, int[] cidx, int pt0, int pt1, int[] xyindex, int idx0, int idx1, bool rootcall, alglib.xparams _params) { int entrywidth = 0; int idxmid = 0; int wrk0 = 0; int wrk1 = 0; double efficiency = 0; double cost = 0; // // Efficiency - performance of the code when compared with that // of linear algebra code. // entrywidth = 2+d; efficiency = 0.1; cost = d*(pt1-pt0+1)*Math.Log(idx1-idx0+1)/Math.Log(2)/efficiency; // // Parallelism // if( ((rootcall && pt1-pt0>10000) && idx1-idx0>=2) && (double)(cost)>(double)(apserv.smpactivationlevel(_params)) ) { if( _trypexec_reorderdatasetandbuildindexrec(xy,d,shadow,ns,cidx,pt0,pt1,xyindex,idx0,idx1,rootcall, _params) ) { return; } } // // Store left bound to XYIndex // xyindex[idx0] = pt0; // // Quick exit strategies // if( idx1<=idx0+1 ) { return; } if( pt0==pt1 ) { for(idxmid=idx0+1; idxmid<=idx1-1; idxmid++) { xyindex[idxmid] = pt1; } return; } // // Select middle element // idxmid = idx0+(idx1-idx0)/2; alglib.ap.assert(idx0=pt0 && cidx[wrk1]>=idxmid ) { wrk1 = wrk1-1; } if( wrk1<=wrk0 ) { break; } apserv.swapentries(xy, wrk0, wrk1, entrywidth, _params); if( ns>0 ) { apserv.swapentries(shadow, wrk0, wrk1, ns, _params); } apserv.swapelementsi(cidx, wrk0, wrk1, _params); } reorderdatasetandbuildindexrec(xy, d, shadow, ns, cidx, pt0, wrk0, xyindex, idx0, idxmid, false, _params); reorderdatasetandbuildindexrec(xy, d, shadow, ns, cidx, wrk0, pt1, xyindex, idxmid, idx1, false, _params); } /************************************************************************* Serial stub for GPL edition. *************************************************************************/ public static bool _trypexec_reorderdatasetandbuildindexrec(double[] xy, int d, double[] shadow, int ns, int[] cidx, int pt0, int pt1, int[] xyindex, int idx0, int idx1, bool rootcall, alglib.xparams _params) { return false; } /************************************************************************* This function performs fitting with BlockLLS solver. Internal function, never use it directly. INPUT PARAMETERS: XY - dataset, array[NPoints,2+D] XYIndex - dataset index, see ReorderDatasetAndBuildIndex() for more info KX0, KX1- X-indices of basis functions to select and fit; range [KX0,KX1) is processed KXTotal - total number of indexes in the entire grid KY0, KY1- Y-indices of basis functions to select and fit; range [KY0,KY1) is processed KYTotal - total number of indexes in the entire grid D - number of components in vector-valued spline, D>=1 LambdaReg- regularization coefficient LambdaNS- nonlinearity penalty, exactly zero value is specially handled (entire set of rows is not added to the matrix) Basis1 - single-dimensional B-spline OUTPUT PARAMETERS: A - design matrix -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void xdesigngenerate(double[] xy, int[] xyindex, int kx0, int kx1, int kxtotal, int ky0, int ky1, int kytotal, int d, double lambdareg, double lambdans, spline1d.spline1dinterpolant basis1, spline2dxdesignmatrix a, alglib.xparams _params) { int entrywidth = 0; int i = 0; int j = 0; int j0 = 0; int j1 = 0; int k0 = 0; int k1 = 0; int kx = 0; int ky = 0; int rowsdone = 0; int batchesdone = 0; int pt0 = 0; int pt1 = 0; int base0 = 0; int base1 = 0; int baseidx = 0; int nzshift = 0; int nzwidth = 0; double[,] d2x = new double[0,0]; double[,] d2y = new double[0,0]; double[,] dxy = new double[0,0]; double v = 0; double v0 = 0; double v1 = 0; double v2 = 0; double w0 = 0; double w1 = 0; double w2 = 0; nzshift = 1; nzwidth = 4; entrywidth = 2+d; kx = kx1-kx0; ky = ky1-ky0; a.lambdareg = lambdareg; a.blockwidth = 4; a.kx = kx; a.ky = ky; a.d = d; a.npoints = 0; a.ndenserows = 0; a.ndensebatches = 0; a.maxbatch = 0; for(j1=ky0; j1<=ky1-2; j1++) { for(j0=kx0; j0<=kx1-2; j0++) { i = xyindex[j1*(kxtotal-1)+j0+1]-xyindex[j1*(kxtotal-1)+j0]; a.npoints = a.npoints+i; a.ndenserows = a.ndenserows+i; a.ndensebatches = a.ndensebatches+1; a.maxbatch = Math.Max(a.maxbatch, i); } } if( (double)(lambdans)!=(double)(0) ) { alglib.ap.assert((double)(lambdans)>=(double)(0), "Spline2DFit: integrity check failed"); a.ndenserows = a.ndenserows+3*(kx-2)*(ky-2); a.ndensebatches = a.ndensebatches+(kx-2)*(ky-2); a.maxbatch = Math.Max(a.maxbatch, 3); } a.nrows = a.ndenserows+kx*ky; apserv.rmatrixsetlengthatleast(ref a.vals, a.ndenserows, a.blockwidth*a.blockwidth+d, _params); apserv.ivectorsetlengthatleast(ref a.batches, a.ndensebatches+1, _params); apserv.ivectorsetlengthatleast(ref a.batchbases, a.ndensebatches, _params); // // Setup output counters // batchesdone = 0; rowsdone = 0; // // Generate rows corresponding to dataset points // alglib.ap.assert(kx>=nzwidth, "Spline2DFit: integrity check failed"); alglib.ap.assert(ky>=nzwidth, "Spline2DFit: integrity check failed"); apserv.rvectorsetlengthatleast(ref a.tmp0, nzwidth, _params); apserv.rvectorsetlengthatleast(ref a.tmp1, nzwidth, _params); a.batches[batchesdone] = 0; for(j1=ky0; j1<=ky1-2; j1++) { for(j0=kx0; j0<=kx1-2; j0++) { pt0 = xyindex[j1*(kxtotal-1)+j0]; pt1 = xyindex[j1*(kxtotal-1)+j0+1]; base0 = apserv.iboundval(j0-kx0-nzshift, 0, kx-nzwidth, _params); base1 = apserv.iboundval(j1-ky0-nzshift, 0, ky-nzwidth, _params); baseidx = base1*kx+base0; a.batchbases[batchesdone] = baseidx; for(i=pt0; i<=pt1-1; i++) { for(k0=0; k0<=nzwidth-1; k0++) { a.tmp0[k0] = spline1d.spline1dcalc(basis1, xy[i*entrywidth+0]-(base0+kx0+k0), _params); } for(k1=0; k1<=nzwidth-1; k1++) { a.tmp1[k1] = spline1d.spline1dcalc(basis1, xy[i*entrywidth+1]-(base1+ky0+k1), _params); } for(k1=0; k1<=nzwidth-1; k1++) { for(k0=0; k0<=nzwidth-1; k0++) { a.vals[rowsdone,k1*nzwidth+k0] = a.tmp0[k0]*a.tmp1[k1]; } } for(j=0; j<=d-1; j++) { a.vals[rowsdone,nzwidth*nzwidth+j] = xy[i*entrywidth+2+j]; } rowsdone = rowsdone+1; } batchesdone = batchesdone+1; a.batches[batchesdone] = rowsdone; } } // // Generate rows corresponding to nonlinearity penalty // if( (double)(lambdans)>(double)(0) ) { // // Smoothing is applied. Because all grid nodes are same, // we apply same smoothing kernel, which is calculated only // once at the beginning of design matrix generation. // d2x = new double[3, 3]; d2y = new double[3, 3]; dxy = new double[3, 3]; for(j1=0; j1<=2; j1++) { for(j0=0; j0<=2; j0++) { d2x[j0,j1] = 0.0; d2y[j0,j1] = 0.0; dxy[j0,j1] = 0.0; } } for(k1=0; k1<=2; k1++) { for(k0=0; k0<=2; k0++) { spline1d.spline1ddiff(basis1, -(k0-1), ref v0, ref v1, ref v2, _params); spline1d.spline1ddiff(basis1, -(k1-1), ref w0, ref w1, ref w2, _params); d2x[k0,k1] = d2x[k0,k1]+v2*w0; d2y[k0,k1] = d2y[k0,k1]+w2*v0; dxy[k0,k1] = dxy[k0,k1]+v1*w1; } } // // Now, kernel is ready - apply it to all inner nodes of the grid. // for(j1=1; j1<=ky-2; j1++) { for(j0=1; j0<=kx-2; j0++) { base0 = apserv.imax2(j0-2, 0, _params); base1 = apserv.imax2(j1-2, 0, _params); baseidx = base1*kx+base0; a.batchbases[batchesdone] = baseidx; // // d2F/dx2 term // v = lambdans; for(j=0; j<=nzwidth*nzwidth+d-1; j++) { a.vals[rowsdone,j] = 0; } for(k1=j1-1; k1<=j1+1; k1++) { for(k0=j0-1; k0<=j0+1; k0++) { a.vals[rowsdone,nzwidth*(k1-base1)+(k0-base0)] = v*d2x[1+(k0-j0),1+(k1-j1)]; } } rowsdone = rowsdone+1; // // d2F/dy2 term // v = lambdans; for(j=0; j<=nzwidth*nzwidth+d-1; j++) { a.vals[rowsdone,j] = 0; } for(k1=j1-1; k1<=j1+1; k1++) { for(k0=j0-1; k0<=j0+1; k0++) { a.vals[rowsdone,nzwidth*(k1-base1)+(k0-base0)] = v*d2y[1+(k0-j0),1+(k1-j1)]; } } rowsdone = rowsdone+1; // // 2*d2F/dxdy term // v = Math.Sqrt(2)*lambdans; for(j=0; j<=nzwidth*nzwidth+d-1; j++) { a.vals[rowsdone,j] = 0; } for(k1=j1-1; k1<=j1+1; k1++) { for(k0=j0-1; k0<=j0+1; k0++) { a.vals[rowsdone,nzwidth*(k1-base1)+(k0-base0)] = v*dxy[1+(k0-j0),1+(k1-j1)]; } } rowsdone = rowsdone+1; batchesdone = batchesdone+1; a.batches[batchesdone] = rowsdone; } } } // // Integrity post-check // alglib.ap.assert(batchesdone==a.ndensebatches, "Spline2DFit: integrity check failed"); alglib.ap.assert(rowsdone==a.ndenserows, "Spline2DFit: integrity check failed"); } /************************************************************************* This function performs matrix-vector product of design matrix and dense vector. INPUT PARAMETERS: A - design matrix, (a.nrows) X (a.kx*a.ky); some fields of A are used for temporaries, so it is non-constant. X - array[A.KX*A.KY] OUTPUT PARAMETERS: Y - product, array[A.NRows], automatically allocated -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void xdesignmv(spline2dxdesignmatrix a, double[] x, ref double[] y, alglib.xparams _params) { int bidx = 0; int i = 0; int cnt = 0; double v = 0; int baseidx = 0; int outidx = 0; int batchsize = 0; int kx = 0; int k0 = 0; int k1 = 0; int nzwidth = 0; nzwidth = 4; alglib.ap.assert(a.blockwidth==nzwidth, "Spline2DFit: integrity check failed"); alglib.ap.assert(alglib.ap.len(x)>=a.kx*a.ky, "Spline2DFit: integrity check failed"); // // Prepare // apserv.rvectorsetlengthatleast(ref y, a.nrows, _params); apserv.rvectorsetlengthatleast(ref a.tmp0, nzwidth*nzwidth, _params); apserv.rvectorsetlengthatleast(ref a.tmp1, a.maxbatch, _params); kx = a.kx; outidx = 0; // // Process dense part // for(bidx=0; bidx<=a.ndensebatches-1; bidx++) { if( a.batches[bidx+1]-a.batches[bidx]>0 ) { batchsize = a.batches[bidx+1]-a.batches[bidx]; baseidx = a.batchbases[bidx]; for(k1=0; k1<=nzwidth-1; k1++) { for(k0=0; k0<=nzwidth-1; k0++) { a.tmp0[k1*nzwidth+k0] = x[baseidx+k1*kx+k0]; } } ablas.rmatrixgemv(batchsize, nzwidth*nzwidth, 1.0, a.vals, a.batches[bidx], 0, 0, a.tmp0, 0, 0.0, a.tmp1, 0, _params); for(i=0; i<=batchsize-1; i++) { y[outidx+i] = a.tmp1[i]; } outidx = outidx+batchsize; } } alglib.ap.assert(outidx==a.ndenserows, "Spline2DFit: integrity check failed"); // // Process regularizer // v = a.lambdareg; cnt = a.kx*a.ky; for(i=0; i<=cnt-1; i++) { y[outidx+i] = v*x[i]; } outidx = outidx+cnt; // // Post-check // alglib.ap.assert(outidx==a.nrows, "Spline2DFit: integrity check failed"); } /************************************************************************* This function performs matrix-vector product of transposed design matrix and dense vector. INPUT PARAMETERS: A - design matrix, (a.nrows) X (a.kx*a.ky); some fields of A are used for temporaries, so it is non-constant. X - array[A.NRows] OUTPUT PARAMETERS: Y - product, array[A.KX*A.KY], automatically allocated -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void xdesignmtv(spline2dxdesignmatrix a, double[] x, ref double[] y, alglib.xparams _params) { int bidx = 0; int i = 0; int cnt = 0; double v = 0; int baseidx = 0; int inidx = 0; int batchsize = 0; int kx = 0; int k0 = 0; int k1 = 0; int nzwidth = 0; nzwidth = 4; alglib.ap.assert(a.blockwidth==nzwidth, "Spline2DFit: integrity check failed"); alglib.ap.assert(alglib.ap.len(x)>=a.nrows, "Spline2DFit: integrity check failed"); // // Prepare // apserv.rvectorsetlengthatleast(ref y, a.kx*a.ky, _params); apserv.rvectorsetlengthatleast(ref a.tmp0, nzwidth*nzwidth, _params); apserv.rvectorsetlengthatleast(ref a.tmp1, a.maxbatch, _params); kx = a.kx; inidx = 0; cnt = a.kx*a.ky; for(i=0; i<=cnt-1; i++) { y[i] = 0; } // // Process dense part // for(bidx=0; bidx<=a.ndensebatches-1; bidx++) { if( a.batches[bidx+1]-a.batches[bidx]>0 ) { batchsize = a.batches[bidx+1]-a.batches[bidx]; baseidx = a.batchbases[bidx]; for(i=0; i<=batchsize-1; i++) { a.tmp1[i] = x[inidx+i]; } ablas.rmatrixgemv(nzwidth*nzwidth, batchsize, 1.0, a.vals, a.batches[bidx], 0, 1, a.tmp1, 0, 0.0, a.tmp0, 0, _params); for(k1=0; k1<=nzwidth-1; k1++) { for(k0=0; k0<=nzwidth-1; k0++) { y[baseidx+k1*kx+k0] = y[baseidx+k1*kx+k0]+a.tmp0[k1*nzwidth+k0]; } } inidx = inidx+batchsize; } } alglib.ap.assert(inidx==a.ndenserows, "Spline2DFit: integrity check failed"); // // Process regularizer // v = a.lambdareg; cnt = a.kx*a.ky; for(i=0; i<=cnt-1; i++) { y[i] = y[i]+v*x[inidx+i]; } inidx = inidx+cnt; // // Post-check // alglib.ap.assert(inidx==a.nrows, "Spline2DFit: integrity check failed"); } /************************************************************************* This function generates squared design matrix stored in block band format. We use an adaptation of block skyline storage format, with TOWERSIZE*KX skyline bands (towers) stored sequentially; here TOWERSIZE=(BlockBandwidth+1)*KX. So, we have KY "towers", stored one below other, in BlockATA matrix. Every "tower" is a sequence of BlockBandwidth+1 cells, each of them being KX*KX in size. INPUT PARAMETERS: A - design matrix; some of its fields are used for temporaries BlockATA- array[KY*(BlockBandwidth+1)*KX,KX], preallocated storage for output matrix in compressed block band format OUTPUT PARAMETERS: BlockATA- AH*AH', stored in compressed block band format MXATA - max(|AH*AH'|), elementwise -- ALGLIB -- Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ private static void xdesignblockata(spline2dxdesignmatrix a, double[,] blockata, ref double mxata, alglib.xparams _params) { int blockbandwidth = 0; int nzwidth = 0; int kx = 0; int ky = 0; int i0 = 0; int i1 = 0; int j0 = 0; int j1 = 0; int celloffset = 0; int bidx = 0; int baseidx = 0; int batchsize = 0; int offs0 = 0; int offs1 = 0; double v = 0; blockbandwidth = 3; nzwidth = 4; kx = a.kx; ky = a.ky; alglib.ap.assert(a.blockwidth==nzwidth, "Spline2DFit: integrity check failed"); apserv.rmatrixsetlengthatleast(ref a.tmp2, nzwidth*nzwidth, nzwidth*nzwidth, _params); // // Initial zero-fill: // * zero-fill ALL elements of BlockATA // * zero-fill ALL elements of Tmp2 // // Filling ALL elements, including unused ones, is essential for the // purposes of calculating max(BlockATA). // for(i1=0; i1<=ky-1; i1++) { for(i0=i1; i0<=Math.Min(ky-1, i1+blockbandwidth); i0++) { celloffset = getcelloffset(kx, ky, blockbandwidth, i1, i0, _params); for(j1=0; j1<=kx-1; j1++) { for(j0=0; j0<=kx-1; j0++) { blockata[celloffset+j1,j0] = 0.0; } } } } for(j1=0; j1<=nzwidth*nzwidth-1; j1++) { for(j0=0; j0<=nzwidth*nzwidth-1; j0++) { a.tmp2[j1,j0] = 0.0; } } // // Process dense part of A // for(bidx=0; bidx<=a.ndensebatches-1; bidx++) { if( a.batches[bidx+1]-a.batches[bidx]>0 ) { // // Generate 16x16 U = BATCH'*BATCH and add it to ATA. // // NOTE: it is essential that lower triangle of Tmp2 is // filled by zeros. // batchsize = a.batches[bidx+1]-a.batches[bidx]; ablas.rmatrixsyrk(nzwidth*nzwidth, batchsize, 1.0, a.vals, a.batches[bidx], 0, 2, 0.0, a.tmp2, 0, 0, true, _params); baseidx = a.batchbases[bidx]; for(i1=0; i1<=nzwidth-1; i1++) { for(j1=i1; j1<=nzwidth-1; j1++) { celloffset = getcelloffset(kx, ky, blockbandwidth, baseidx/kx+i1, baseidx/kx+j1, _params); offs0 = baseidx%kx; offs1 = baseidx%kx; for(i0=0; i0<=nzwidth-1; i0++) { for(j0=0; j0<=nzwidth-1; j0++) { v = a.tmp2[i1*nzwidth+i0,j1*nzwidth+j0]; blockata[celloffset+offs1+i0,offs0+j0] = blockata[celloffset+offs1+i0,offs0+j0]+v; } } } } } } // // Process regularizer term // for(i1=0; i1<=ky-1; i1++) { celloffset = getcelloffset(kx, ky, blockbandwidth, i1, i1, _params); for(j1=0; j1<=kx-1; j1++) { blockata[celloffset+j1,j1] = blockata[celloffset+j1,j1]+math.sqr(a.lambdareg); } } // // Calculate max(ATA) // // NOTE: here we rely on zero initialization of unused parts of // BlockATA and Tmp2. // mxata = 0.0; for(i1=0; i1<=ky-1; i1++) { for(i0=i1; i0<=Math.Min(ky-1, i1+blockbandwidth); i0++) { celloffset = getcelloffset(kx, ky, blockbandwidth, i1, i0, _params); for(j1=0; j1<=kx-1; j1++) { for(j0=0; j0<=kx-1; j0++) { mxata = Math.Max(mxata, Math.Abs(blockata[celloffset+j1,j0])); } } } } } } public class rbfv1 { /************************************************************************* Buffer object which is used to perform nearest neighbor requests in the multithreaded mode (multiple threads working with same KD-tree object). This object should be created with KDTreeCreateBuffer(). *************************************************************************/ public class rbfv1calcbuffer : apobject { public double[] calcbufxcx; public double[,] calcbufx; public int[] calcbuftags; public nearestneighbor.kdtreerequestbuffer requestbuffer; public rbfv1calcbuffer() { init(); } public override void init() { calcbufxcx = new double[0]; calcbufx = new double[0,0]; calcbuftags = new int[0]; requestbuffer = new nearestneighbor.kdtreerequestbuffer(); } public override alglib.apobject make_copy() { rbfv1calcbuffer _result = new rbfv1calcbuffer(); _result.calcbufxcx = (double[])calcbufxcx.Clone(); _result.calcbufx = (double[,])calcbufx.Clone(); _result.calcbuftags = (int[])calcbuftags.Clone(); _result.requestbuffer = (nearestneighbor.kdtreerequestbuffer)requestbuffer.make_copy(); return _result; } }; /************************************************************************* RBF model. Never try to directly work with fields of this object - always use ALGLIB functions to use this object. *************************************************************************/ public class rbfv1model : apobject { public int ny; public int nx; public int nc; public int nl; public nearestneighbor.kdtree tree; public double[,] xc; public double[,] wr; public double rmax; public double[,] v; public double[] calcbufxcx; public double[,] calcbufx; public int[] calcbuftags; public rbfv1model() { init(); } public override void init() { tree = new nearestneighbor.kdtree(); xc = new double[0,0]; wr = new double[0,0]; v = new double[0,0]; calcbufxcx = new double[0]; calcbufx = new double[0,0]; calcbuftags = new int[0]; } public override alglib.apobject make_copy() { rbfv1model _result = new rbfv1model(); _result.ny = ny; _result.nx = nx; _result.nc = nc; _result.nl = nl; _result.tree = (nearestneighbor.kdtree)tree.make_copy(); _result.xc = (double[,])xc.Clone(); _result.wr = (double[,])wr.Clone(); _result.rmax = rmax; _result.v = (double[,])v.Clone(); _result.calcbufxcx = (double[])calcbufxcx.Clone(); _result.calcbufx = (double[,])calcbufx.Clone(); _result.calcbuftags = (int[])calcbuftags.Clone(); return _result; } }; /************************************************************************* Internal buffer for GridCalc3 *************************************************************************/ public class gridcalc3v1buf : apobject { public double[] tx; public double[] cx; public double[] ty; public bool[] flag0; public bool[] flag1; public bool[] flag2; public bool[] flag12; public double[] expbuf0; public double[] expbuf1; public double[] expbuf2; public nearestneighbor.kdtreerequestbuffer requestbuf; public double[,] calcbufx; public int[] calcbuftags; public gridcalc3v1buf() { init(); } public override void init() { tx = new double[0]; cx = new double[0]; ty = new double[0]; flag0 = new bool[0]; flag1 = new bool[0]; flag2 = new bool[0]; flag12 = new bool[0]; expbuf0 = new double[0]; expbuf1 = new double[0]; expbuf2 = new double[0]; requestbuf = new nearestneighbor.kdtreerequestbuffer(); calcbufx = new double[0,0]; calcbuftags = new int[0]; } public override alglib.apobject make_copy() { gridcalc3v1buf _result = new gridcalc3v1buf(); _result.tx = (double[])tx.Clone(); _result.cx = (double[])cx.Clone(); _result.ty = (double[])ty.Clone(); _result.flag0 = (bool[])flag0.Clone(); _result.flag1 = (bool[])flag1.Clone(); _result.flag2 = (bool[])flag2.Clone(); _result.flag12 = (bool[])flag12.Clone(); _result.expbuf0 = (double[])expbuf0.Clone(); _result.expbuf1 = (double[])expbuf1.Clone(); _result.expbuf2 = (double[])expbuf2.Clone(); _result.requestbuf = (nearestneighbor.kdtreerequestbuffer)requestbuf.make_copy(); _result.calcbufx = (double[,])calcbufx.Clone(); _result.calcbuftags = (int[])calcbuftags.Clone(); return _result; } }; /************************************************************************* RBF solution report: * TerminationType - termination type, positive values - success, non-positive - failure. *************************************************************************/ public class rbfv1report : apobject { public int arows; public int acols; public int annz; public int iterationscount; public int nmv; public int terminationtype; public rbfv1report() { init(); } public override void init() { } public override alglib.apobject make_copy() { rbfv1report _result = new rbfv1report(); _result.arows = arows; _result.acols = acols; _result.annz = annz; _result.iterationscount = iterationscount; _result.nmv = nmv; _result.terminationtype = terminationtype; return _result; } }; public const int mxnx = 3; public const double rbffarradius = 6; public const double rbfnearradius = 2.1; public const double rbfmlradius = 3; public const double minbasecasecost = 100000; /************************************************************************* This function creates RBF model for a scalar (NY=1) or vector (NY>1) function in a NX-dimensional space (NX=2 or NX=3). INPUT PARAMETERS: NX - dimension of the space, NX=2 or NX=3 NY - function dimension, NY>=1 OUTPUT PARAMETERS: S - RBF model (initially equals to zero) -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfv1create(int nx, int ny, rbfv1model s, alglib.xparams _params) { int i = 0; int j = 0; alglib.ap.assert(nx==2 || nx==3, "RBFCreate: NX<>2 and NX<>3"); alglib.ap.assert(ny>=1, "RBFCreate: NY<1"); s.nx = nx; s.ny = ny; s.nl = 0; s.nc = 0; s.v = new double[ny, mxnx+1]; for(i=0; i<=ny-1; i++) { for(j=0; j<=mxnx; j++) { s.v[i,j] = 0; } } s.rmax = 0; } /************************************************************************* This function creates buffer structure which can be used to perform parallel RBF model evaluations (with one RBF model instance being used from multiple threads, as long as different threads use different instances of buffer). This buffer object can be used with rbftscalcbuf() function (here "ts" stands for "thread-safe", "buf" is a suffix which denotes function which reuses previously allocated output space). How to use it: * create RBF model structure with rbfcreate() * load data, tune parameters * call rbfbuildmodel() * call rbfcreatecalcbuffer(), once per thread working with RBF model (you should call this function only AFTER call to rbfbuildmodel(), see below for more information) * call rbftscalcbuf() from different threads, with each thread working with its own copy of buffer object. INPUT PARAMETERS S - RBF model OUTPUT PARAMETERS Buf - external buffer. IMPORTANT: buffer object should be used only with RBF model object which was used to initialize buffer. Any attempt to use buffer with different object is dangerous - you may get memory violation error because sizes of internal arrays do not fit to dimensions of RBF structure. IMPORTANT: you should call this function only for model which was built with rbfbuildmodel() function, after successful invocation of rbfbuildmodel(). Sizes of some internal structures are determined only after model is built, so buffer object created before model construction stage will be useless (and any attempt to use it will result in exception). -- ALGLIB -- Copyright 02.04.2016 by Sergey Bochkanov *************************************************************************/ public static void rbfv1createcalcbuffer(rbfv1model s, rbfv1calcbuffer buf, alglib.xparams _params) { nearestneighbor.kdtreecreaterequestbuffer(s.tree, buf.requestbuffer, _params); } /************************************************************************* This function builds RBF model and returns report (contains some information which can be used for evaluation of the algorithm properties). Call to this function modifies RBF model by calculating its centers/radii/ weights and saving them into RBFModel structure. Initially RBFModel contain zero coefficients, but after call to this function we will have coefficients which were calculated in order to fit our dataset. After you called this function you can call RBFCalc(), RBFGridCalc() and other model calculation functions. INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call Rep - report: * Rep.TerminationType: * -5 - non-distinct basis function centers were detected, interpolation aborted * -4 - nonconvergence of the internal SVD solver * 1 - successful termination Fields are used for debugging purposes: * Rep.IterationsCount - iterations count of the LSQR solver * Rep.NMV - number of matrix-vector products * Rep.ARows - rows count for the system matrix * Rep.ACols - columns count for the system matrix * Rep.ANNZ - number of significantly non-zero elements (elements above some algorithm-determined threshold) NOTE: failure to build model will leave current state of the structure unchanged. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfv1buildmodel(double[,] x, double[,] y, int n, int aterm, int algorithmtype, int nlayers, double radvalue, double radzvalue, double lambdav, double epsort, double epserr, int maxits, rbfv1model s, rbfv1report rep, alglib.xparams _params) { nearestneighbor.kdtree tree = new nearestneighbor.kdtree(); nearestneighbor.kdtree ctree = new nearestneighbor.kdtree(); double[] dist = new double[0]; double[] xcx = new double[0]; double[,] a = new double[0,0]; double[,] v = new double[0,0]; double[,] omega = new double[0,0]; double[,] residualy = new double[0,0]; double[] radius = new double[0]; double[,] xc = new double[0,0]; int nc = 0; double rmax = 0; int[] tags = new int[0]; int[] ctags = new int[0]; int i = 0; int j = 0; int k = 0; int snnz = 0; double[] tmp0 = new double[0]; double[] tmp1 = new double[0]; int layerscnt = 0; bool modelstatus = new bool(); alglib.ap.assert(s.nx==2 || s.nx==3, "RBFBuildModel: S.NX<>2 or S.NX<>3!"); // // Quick exit when we have no points // if( n==0 ) { rep.terminationtype = 1; rep.iterationscount = 0; rep.nmv = 0; rep.arows = 0; rep.acols = 0; nearestneighbor.kdtreebuildtagged(s.xc, tags, 0, mxnx, 0, 2, s.tree, _params); s.xc = new double[0, 0]; s.wr = new double[0, 0]; s.nc = 0; s.rmax = 0; s.v = new double[s.ny, mxnx+1]; for(i=0; i<=s.ny-1; i++) { for(j=0; j<=mxnx; j++) { s.v[i,j] = 0; } } return; } // // General case, N>0 // rep.annz = 0; rep.iterationscount = 0; rep.nmv = 0; xcx = new double[mxnx]; // // First model in a sequence - linear model. // Residuals from linear regression are stored in the ResidualY variable // (used later to build RBF models). // residualy = new double[n, s.ny]; for(i=0; i<=n-1; i++) { for(j=0; j<=s.ny-1; j++) { residualy[i,j] = y[i,j]; } } if( !rbfv1buildlinearmodel(x, ref residualy, n, s.ny, aterm, ref v, _params) ) { rep.terminationtype = -5; return; } // // Handle special case: multilayer model with NLayers=0. // Quick exit. // if( algorithmtype==2 && nlayers==0 ) { rep.terminationtype = 1; rep.iterationscount = 0; rep.nmv = 0; rep.arows = 0; rep.acols = 0; nearestneighbor.kdtreebuildtagged(s.xc, tags, 0, mxnx, 0, 2, s.tree, _params); s.xc = new double[0, 0]; s.wr = new double[0, 0]; s.nc = 0; s.rmax = 0; s.v = new double[s.ny, mxnx+1]; for(i=0; i<=s.ny-1; i++) { for(j=0; j<=mxnx; j++) { s.v[i,j] = v[i,j]; } } return; } // // Second model in a sequence - RBF term. // // NOTE: assignments below are not necessary, but without them // MSVC complains about unitialized variables. // nc = 0; rmax = 0; layerscnt = 0; modelstatus = false; if( algorithmtype==1 ) { // // Add RBF model. // This model uses local KD-trees to speed-up nearest neighbor searches. // nc = n; xc = new double[nc, mxnx]; for(i=0; i<=nc-1; i++) { for(j=0; j<=mxnx-1; j++) { xc[i,j] = x[i,j]; } } rmax = 0; radius = new double[nc]; ctags = new int[nc]; for(i=0; i<=nc-1; i++) { ctags[i] = i; } nearestneighbor.kdtreebuildtagged(xc, ctags, nc, mxnx, 0, 2, ctree, _params); if( nc==0 ) { rmax = 1; } else { if( nc==1 ) { radius[0] = radvalue; rmax = radius[0]; } else { // // NC>1, calculate radii using distances to nearest neigbors // for(i=0; i<=nc-1; i++) { for(j=0; j<=mxnx-1; j++) { xcx[j] = xc[i,j]; } if( nearestneighbor.kdtreequeryknn(ctree, xcx, 1, false, _params)>0 ) { nearestneighbor.kdtreequeryresultsdistances(ctree, ref dist, _params); radius[i] = radvalue*dist[0]; } else { // // No neighbors found (it will happen when we have only one center). // Initialize radius with default value. // radius[i] = 1.0; } } // // Apply filtering // apserv.rvectorsetlengthatleast(ref tmp0, nc, _params); for(i=0; i<=nc-1; i++) { tmp0[i] = radius[i]; } tsort.tagsortfast(ref tmp0, ref tmp1, nc, _params); for(i=0; i<=nc-1; i++) { radius[i] = Math.Min(radius[i], radzvalue*tmp0[nc/2]); } // // Calculate RMax, check that all radii are non-zero // for(i=0; i<=nc-1; i++) { rmax = Math.Max(rmax, radius[i]); } for(i=0; i<=nc-1; i++) { if( (double)(radius[i])==(double)(0) ) { rep.terminationtype = -5; return; } } } } apserv.ivectorsetlengthatleast(ref tags, n, _params); for(i=0; i<=n-1; i++) { tags[i] = i; } nearestneighbor.kdtreebuildtagged(x, tags, n, mxnx, 0, 2, tree, _params); buildrbfmodellsqr(x, ref residualy, xc, radius, n, nc, s.ny, tree, ctree, epsort, epserr, maxits, ref rep.annz, ref snnz, ref omega, ref rep.terminationtype, ref rep.iterationscount, ref rep.nmv, _params); layerscnt = 1; modelstatus = true; } if( algorithmtype==2 ) { rmax = radvalue; buildrbfmlayersmodellsqr(x, ref residualy, ref xc, radvalue, ref radius, n, ref nc, s.ny, nlayers, ctree, 1.0E-6, 1.0E-6, 50, lambdav, ref rep.annz, ref omega, ref rep.terminationtype, ref rep.iterationscount, ref rep.nmv, _params); layerscnt = nlayers; modelstatus = true; } alglib.ap.assert(modelstatus, "RBFBuildModel: integrity error"); if( rep.terminationtype<=0 ) { return; } // // Model is built // s.nc = nc/layerscnt; s.rmax = rmax; s.nl = layerscnt; s.xc = new double[s.nc, mxnx]; s.wr = new double[s.nc, 1+s.nl*s.ny]; s.v = new double[s.ny, mxnx+1]; for(i=0; i<=s.nc-1; i++) { for(j=0; j<=mxnx-1; j++) { s.xc[i,j] = xc[i,j]; } } apserv.ivectorsetlengthatleast(ref tags, s.nc, _params); for(i=0; i<=s.nc-1; i++) { tags[i] = i; } nearestneighbor.kdtreebuildtagged(s.xc, tags, s.nc, mxnx, 0, 2, s.tree, _params); for(i=0; i<=s.nc-1; i++) { s.wr[i,0] = radius[i]; for(k=0; k<=layerscnt-1; k++) { for(j=0; j<=s.ny-1; j++) { s.wr[i,1+k*s.ny+j] = omega[k*s.nc+i,j]; } } } for(i=0; i<=s.ny-1; i++) { for(j=0; j<=mxnx; j++) { s.v[i,j] = v[i,j]; } } rep.terminationtype = 1; rep.arows = n; rep.acols = s.nc; } /************************************************************************* Serializer: allocation -- ALGLIB -- Copyright 02.02.2012 by Bochkanov Sergey *************************************************************************/ public static void rbfv1alloc(alglib.serializer s, rbfv1model model, alglib.xparams _params) { // // Data // s.alloc_entry(); s.alloc_entry(); s.alloc_entry(); s.alloc_entry(); nearestneighbor.kdtreealloc(s, model.tree, _params); apserv.allocrealmatrix(s, model.xc, -1, -1, _params); apserv.allocrealmatrix(s, model.wr, -1, -1, _params); s.alloc_entry(); apserv.allocrealmatrix(s, model.v, -1, -1, _params); } /************************************************************************* Serializer: serialization -- ALGLIB -- Copyright 02.02.2012 by Bochkanov Sergey *************************************************************************/ public static void rbfv1serialize(alglib.serializer s, rbfv1model model, alglib.xparams _params) { // // Data // s.serialize_int(model.nx); s.serialize_int(model.ny); s.serialize_int(model.nc); s.serialize_int(model.nl); nearestneighbor.kdtreeserialize(s, model.tree, _params); apserv.serializerealmatrix(s, model.xc, -1, -1, _params); apserv.serializerealmatrix(s, model.wr, -1, -1, _params); s.serialize_double(model.rmax); apserv.serializerealmatrix(s, model.v, -1, -1, _params); } /************************************************************************* Serializer: unserialization -- ALGLIB -- Copyright 02.02.2012 by Bochkanov Sergey *************************************************************************/ public static void rbfv1unserialize(alglib.serializer s, rbfv1model model, alglib.xparams _params) { int nx = 0; int ny = 0; // // Unserialize primary model parameters, initialize model. // // It is necessary to call RBFCreate() because some internal fields // which are NOT unserialized will need initialization. // nx = s.unserialize_int(); ny = s.unserialize_int(); rbfv1create(nx, ny, model, _params); model.nc = s.unserialize_int(); model.nl = s.unserialize_int(); nearestneighbor.kdtreeunserialize(s, model.tree, _params); apserv.unserializerealmatrix(s, ref model.xc, _params); apserv.unserializerealmatrix(s, ref model.wr, _params); model.rmax = s.unserialize_double(); apserv.unserializerealmatrix(s, ref model.v, _params); } /************************************************************************* This function calculates values of the RBF model in the given point. This function should be used when we have NY=1 (scalar function) and NX=2 (2-dimensional space). If you have 3-dimensional space, use RBFCalc3(). If you have general situation (NX-dimensional space, NY-dimensional function) you should use general, less efficient implementation RBFCalc(). If you want to calculate function values many times, consider using RBFGridCalc2(), which is far more efficient than many subsequent calls to RBFCalc2(). This function returns 0.0 when: * model is not initialized * NX<>2 *NY<>1 INPUT PARAMETERS: S - RBF model X0 - first coordinate, finite number X1 - second coordinate, finite number RESULT: value of the model or 0.0 (as defined above) -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static double rbfv1calc2(rbfv1model s, double x0, double x1, alglib.xparams _params) { double result = 0; int i = 0; int j = 0; int lx = 0; int tg = 0; double d2 = 0; double t = 0; double bfcur = 0; double rcur = 0; alglib.ap.assert(math.isfinite(x0), "RBFCalc2: invalid value for X0 (X0 is Inf)!"); alglib.ap.assert(math.isfinite(x1), "RBFCalc2: invalid value for X1 (X1 is Inf)!"); if( s.ny!=1 || s.nx!=2 ) { result = 0; return result; } result = s.v[0,0]*x0+s.v[0,1]*x1+s.v[0,mxnx]; if( s.nc==0 ) { return result; } apserv.rvectorsetlengthatleast(ref s.calcbufxcx, mxnx, _params); for(i=0; i<=mxnx-1; i++) { s.calcbufxcx[i] = 0.0; } s.calcbufxcx[0] = x0; s.calcbufxcx[1] = x1; lx = nearestneighbor.kdtreequeryrnn(s.tree, s.calcbufxcx, s.rmax*rbffarradius, true, _params); nearestneighbor.kdtreequeryresultsx(s.tree, ref s.calcbufx, _params); nearestneighbor.kdtreequeryresultstags(s.tree, ref s.calcbuftags, _params); for(i=0; i<=lx-1; i++) { tg = s.calcbuftags[i]; d2 = math.sqr(x0-s.calcbufx[i,0])+math.sqr(x1-s.calcbufx[i,1]); rcur = s.wr[tg,0]; bfcur = Math.Exp(-(d2/(rcur*rcur))); for(j=0; j<=s.nl-1; j++) { result = result+bfcur*s.wr[tg,1+j]; rcur = 0.5*rcur; t = bfcur*bfcur; bfcur = t*t; } } return result; } /************************************************************************* This function calculates values of the RBF model in the given point. This function should be used when we have NY=1 (scalar function) and NX=3 (3-dimensional space). If you have 2-dimensional space, use RBFCalc2(). If you have general situation (NX-dimensional space, NY-dimensional function) you should use general, less efficient implementation RBFCalc(). This function returns 0.0 when: * model is not initialized * NX<>3 *NY<>1 INPUT PARAMETERS: S - RBF model X0 - first coordinate, finite number X1 - second coordinate, finite number X2 - third coordinate, finite number RESULT: value of the model or 0.0 (as defined above) -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static double rbfv1calc3(rbfv1model s, double x0, double x1, double x2, alglib.xparams _params) { double result = 0; int i = 0; int j = 0; int lx = 0; int tg = 0; double t = 0; double rcur = 0; double bf = 0; alglib.ap.assert(math.isfinite(x0), "RBFCalc3: invalid value for X0 (X0 is Inf or NaN)!"); alglib.ap.assert(math.isfinite(x1), "RBFCalc3: invalid value for X1 (X1 is Inf or NaN)!"); alglib.ap.assert(math.isfinite(x2), "RBFCalc3: invalid value for X2 (X2 is Inf or NaN)!"); if( s.ny!=1 || s.nx!=3 ) { result = 0; return result; } result = s.v[0,0]*x0+s.v[0,1]*x1+s.v[0,2]*x2+s.v[0,mxnx]; if( s.nc==0 ) { return result; } // // calculating value for F(X) // apserv.rvectorsetlengthatleast(ref s.calcbufxcx, mxnx, _params); for(i=0; i<=mxnx-1; i++) { s.calcbufxcx[i] = 0.0; } s.calcbufxcx[0] = x0; s.calcbufxcx[1] = x1; s.calcbufxcx[2] = x2; lx = nearestneighbor.kdtreequeryrnn(s.tree, s.calcbufxcx, s.rmax*rbffarradius, true, _params); nearestneighbor.kdtreequeryresultsx(s.tree, ref s.calcbufx, _params); nearestneighbor.kdtreequeryresultstags(s.tree, ref s.calcbuftags, _params); for(i=0; i<=lx-1; i++) { tg = s.calcbuftags[i]; rcur = s.wr[tg,0]; bf = Math.Exp(-((math.sqr(x0-s.calcbufx[i,0])+math.sqr(x1-s.calcbufx[i,1])+math.sqr(x2-s.calcbufx[i,2]))/math.sqr(rcur))); for(j=0; j<=s.nl-1; j++) { result = result+bf*s.wr[tg,1+j]; t = bf*bf; bf = t*t; } } return result; } /************************************************************************* This function calculates values of the RBF model at the given point. Same as RBFCalc(), but does not reallocate Y when in is large enough to store function values. INPUT PARAMETERS: S - RBF model X - coordinates, array[NX]. X may have more than NX elements, in this case only leading NX will be used. Y - possibly preallocated array OUTPUT PARAMETERS: Y - function value, array[NY]. Y is not reallocated when it is larger than NY. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfv1calcbuf(rbfv1model s, double[] x, ref double[] y, alglib.xparams _params) { int i = 0; int j = 0; int k = 0; int lx = 0; int tg = 0; double t = 0; double rcur = 0; double bf = 0; alglib.ap.assert(alglib.ap.len(x)>=s.nx, "RBFCalcBuf: Length(X)=s.nx, "RBFCalcBuf: Length(X)2 *NY<>1 INPUT PARAMETERS: S - RBF model X0 - array of grid nodes, first coordinates, array[N0] N0 - grid size (number of nodes) in the first dimension X1 - array of grid nodes, second coordinates, array[N1] N1 - grid size (number of nodes) in the second dimension OUTPUT PARAMETERS: Y - function values, array[N0,N1]. Y is out-variable and is reallocated by this function. NOTE: as a special exception, this function supports unordered arrays X0 and X1. However, future versions may be more efficient for X0/X1 ordered by ascending. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfv1gridcalc2(rbfv1model s, double[] x0, int n0, double[] x1, int n1, ref double[,] y, alglib.xparams _params) { double[] cpx0 = new double[0]; double[] cpx1 = new double[0]; int[] p01 = new int[0]; int[] p11 = new int[0]; int[] p2 = new int[0]; double rlimit = 0; double xcnorm2 = 0; int hp01 = 0; double hcpx0 = 0; double xc0 = 0; double xc1 = 0; double omega = 0; double radius = 0; int i = 0; int j = 0; int k = 0; int d = 0; int i00 = 0; int i01 = 0; int i10 = 0; int i11 = 0; y = new double[0,0]; alglib.ap.assert(n0>0, "RBFGridCalc2: invalid value for N0 (N0<=0)!"); alglib.ap.assert(n1>0, "RBFGridCalc2: invalid value for N1 (N1<=0)!"); alglib.ap.assert(alglib.ap.len(x0)>=n0, "RBFGridCalc2: Length(X0)=n1, "RBFGridCalc2: Length(X1)=(double)(minbasecasecost) && maxbs>=2 ) { if( block0b-block0a==maxbs ) { rbfv1gridcalc3vrec(s, x0, n0, x1, n1, x2, n2, blocks0, block0a, block0a+maxbs/2, blocks1, block1a, block1b, blocks2, block2a, block2b, flagy, sparsey, searchradius, avgfuncpernode, bufpool, y, _params); rbfv1gridcalc3vrec(s, x0, n0, x1, n1, x2, n2, blocks0, block0a+maxbs/2, block0b, blocks1, block1a, block1b, blocks2, block2a, block2b, flagy, sparsey, searchradius, avgfuncpernode, bufpool, y, _params); return; } if( block1b-block1a==maxbs ) { rbfv1gridcalc3vrec(s, x0, n0, x1, n1, x2, n2, blocks0, block0a, block0b, blocks1, block1a, block1a+maxbs/2, blocks2, block2a, block2b, flagy, sparsey, searchradius, avgfuncpernode, bufpool, y, _params); rbfv1gridcalc3vrec(s, x0, n0, x1, n1, x2, n2, blocks0, block0a, block0b, blocks1, block1a+maxbs/2, block1b, blocks2, block2a, block2b, flagy, sparsey, searchradius, avgfuncpernode, bufpool, y, _params); return; } if( block2b-block2a==maxbs ) { rbfv1gridcalc3vrec(s, x0, n0, x1, n1, x2, n2, blocks0, block0a, block0b, blocks1, block1a, block1b, blocks2, block2a, block2a+maxbs/2, flagy, sparsey, searchradius, avgfuncpernode, bufpool, y, _params); rbfv1gridcalc3vrec(s, x0, n0, x1, n1, x2, n2, blocks0, block0a, block0b, blocks1, block1a, block1b, blocks2, block2a+maxbs/2, block2b, flagy, sparsey, searchradius, avgfuncpernode, bufpool, y, _params); return; } } // // Retrieve buffer object from pool (it will be returned later) // alglib.smp.ae_shared_pool_retrieve(bufpool, ref pbuf); // // Calculate RBF model // for(i2=block2a; i2<=block2b-1; i2++) { for(i1=block1a; i1<=block1b-1; i1++) { for(i0=block0a; i0<=block0b-1; i0++) { // // Analyze block - determine what elements are needed and what are not. // // After this block is done, two flag variables can be used: // * SomeNodes, which is True when there are at least one node which have // to be calculated // * AllNodes, which is True when all nodes are required // somenodes = true; allnodes = true; flag12dim1 = blocks1[i1+1]-blocks1[i1]; flag12dim2 = blocks2[i2+1]-blocks2[i2]; if( sparsey ) { // // Use FlagY to determine what is required. // apserv.bvectorsetlengthatleast(ref pbuf.flag0, n0, _params); apserv.bvectorsetlengthatleast(ref pbuf.flag1, n1, _params); apserv.bvectorsetlengthatleast(ref pbuf.flag2, n2, _params); apserv.bvectorsetlengthatleast(ref pbuf.flag12, flag12dim1*flag12dim2, _params); for(i=blocks0[i0]; i<=blocks0[i0+1]-1; i++) { pbuf.flag0[i] = false; } for(j=blocks1[i1]; j<=blocks1[i1+1]-1; j++) { pbuf.flag1[j] = false; } for(k=blocks2[i2]; k<=blocks2[i2+1]-1; k++) { pbuf.flag2[k] = false; } for(i=0; i<=flag12dim1*flag12dim2-1; i++) { pbuf.flag12[i] = false; } somenodes = false; allnodes = true; for(k=blocks2[i2]; k<=blocks2[i2+1]-1; k++) { for(j=blocks1[i1]; j<=blocks1[i1+1]-1; j++) { dstoffs = j-blocks1[i1]+flag12dim1*(k-blocks2[i2]); srcoffs = j*n0+k*n0*n1; for(i=blocks0[i0]; i<=blocks0[i0+1]-1; i++) { if( flagy[srcoffs+i] ) { pbuf.flag0[i] = true; pbuf.flag1[j] = true; pbuf.flag2[k] = true; pbuf.flag12[dstoffs] = true; somenodes = true; } else { allnodes = false; } } } } } // // Skip block if it is completely empty. // if( !somenodes ) { continue; } // // compute linear term for block (I0,I1,I2) // for(k=blocks2[i2]; k<=blocks2[i2+1]-1; k++) { for(j=blocks1[i1]; j<=blocks1[i1+1]-1; j++) { // // do we need this micro-row? // if( !allnodes && !pbuf.flag12[j-blocks1[i1]+flag12dim1*(k-blocks2[i2])] ) { continue; } // // Compute linear term // for(i=blocks0[i0]; i<=blocks0[i0+1]-1; i++) { pbuf.tx[0] = x0[i]; pbuf.tx[1] = x1[j]; pbuf.tx[2] = x2[k]; for(l=0; l<=s.ny-1; l++) { v = s.v[l,mxnx]; for(t=0; t<=nx-1; t++) { v = v+s.v[l,t]*pbuf.tx[t]; } y[l+ny*(i+j*n0+k*n0*n1)] = v; } } } } // // compute RBF term for block (I0,I1,I2) // pbuf.tx[0] = 0.5*(x0[blocks0[i0]]+x0[blocks0[i0+1]-1]); pbuf.tx[1] = 0.5*(x1[blocks1[i1]]+x1[blocks1[i1+1]-1]); pbuf.tx[2] = 0.5*(x2[blocks2[i2]]+x2[blocks2[i2+1]-1]); kc = nearestneighbor.kdtreetsqueryrnn(s.tree, pbuf.requestbuf, pbuf.tx, searchradius, true, _params); nearestneighbor.kdtreetsqueryresultsx(s.tree, pbuf.requestbuf, ref pbuf.calcbufx, _params); nearestneighbor.kdtreetsqueryresultstags(s.tree, pbuf.requestbuf, ref pbuf.calcbuftags, _params); for(ic=0; ic<=kc-1; ic++) { pbuf.cx[0] = pbuf.calcbufx[ic,0]; pbuf.cx[1] = pbuf.calcbufx[ic,1]; pbuf.cx[2] = pbuf.calcbufx[ic,2]; tg = pbuf.calcbuftags[ic]; rcur = s.wr[tg,0]; rcur2 = rcur*rcur; for(i=blocks0[i0]; i<=blocks0[i0+1]-1; i++) { if( allnodes || pbuf.flag0[i] ) { pbuf.expbuf0[i] = Math.Exp(-(math.sqr(x0[i]-pbuf.cx[0])/rcur2)); } else { pbuf.expbuf0[i] = 0.0; } } for(j=blocks1[i1]; j<=blocks1[i1+1]-1; j++) { if( allnodes || pbuf.flag1[j] ) { pbuf.expbuf1[j] = Math.Exp(-(math.sqr(x1[j]-pbuf.cx[1])/rcur2)); } else { pbuf.expbuf1[j] = 0.0; } } for(k=blocks2[i2]; k<=blocks2[i2+1]-1; k++) { if( allnodes || pbuf.flag2[k] ) { pbuf.expbuf2[k] = Math.Exp(-(math.sqr(x2[k]-pbuf.cx[2])/rcur2)); } else { pbuf.expbuf2[k] = 0.0; } } for(t=0; t<=s.nl-1; t++) { // // Calculate // for(k=blocks2[i2]; k<=blocks2[i2+1]-1; k++) { for(j=blocks1[i1]; j<=blocks1[i1+1]-1; j++) { // // do we need this micro-row? // if( !allnodes && !pbuf.flag12[j-blocks1[i1]+flag12dim1*(k-blocks2[i2])] ) { continue; } // // Prepare local variables // dstoffs = ny*(blocks0[i0]+j*n0+k*n0*n1); v = pbuf.expbuf1[j]*pbuf.expbuf2[k]; // // Optimized for NY=1 // if( s.ny==1 ) { w0 = s.wr[tg,1+t*s.ny+0]; ubnd = blocks0[i0+1]-1; for(i=blocks0[i0]; i<=ubnd; i++) { basisfuncval = pbuf.expbuf0[i]*v; y[dstoffs] = y[dstoffs]+basisfuncval*w0; dstoffs = dstoffs+1; } continue; } // // Optimized for NY=2 // if( s.ny==2 ) { w0 = s.wr[tg,1+t*s.ny+0]; w1 = s.wr[tg,1+t*s.ny+1]; ubnd = blocks0[i0+1]-1; for(i=blocks0[i0]; i<=ubnd; i++) { basisfuncval = pbuf.expbuf0[i]*v; y[dstoffs+0] = y[dstoffs+0]+basisfuncval*w0; y[dstoffs+1] = y[dstoffs+1]+basisfuncval*w1; dstoffs = dstoffs+2; } continue; } // // Optimized for NY=3 // if( s.ny==3 ) { w0 = s.wr[tg,1+t*s.ny+0]; w1 = s.wr[tg,1+t*s.ny+1]; w2 = s.wr[tg,1+t*s.ny+2]; ubnd = blocks0[i0+1]-1; for(i=blocks0[i0]; i<=ubnd; i++) { basisfuncval = pbuf.expbuf0[i]*v; y[dstoffs+0] = y[dstoffs+0]+basisfuncval*w0; y[dstoffs+1] = y[dstoffs+1]+basisfuncval*w1; y[dstoffs+2] = y[dstoffs+2]+basisfuncval*w2; dstoffs = dstoffs+3; } continue; } // // General case // for(i=blocks0[i0]; i<=blocks0[i0+1]-1; i++) { basisfuncval = pbuf.expbuf0[i]*v; for(l=0; l<=s.ny-1; l++) { y[l+dstoffs] = y[l+dstoffs]+basisfuncval*s.wr[tg,1+t*s.ny+l]; } dstoffs = dstoffs+ny; } } } // // Update basis functions // if( t!=s.nl-1 ) { ubnd = blocks0[i0+1]-1; for(i=blocks0[i0]; i<=ubnd; i++) { if( allnodes || pbuf.flag0[i] ) { v = pbuf.expbuf0[i]*pbuf.expbuf0[i]; pbuf.expbuf0[i] = v*v; } } ubnd = blocks1[i1+1]-1; for(j=blocks1[i1]; j<=ubnd; j++) { if( allnodes || pbuf.flag1[j] ) { v = pbuf.expbuf1[j]*pbuf.expbuf1[j]; pbuf.expbuf1[j] = v*v; } } ubnd = blocks2[i2+1]-1; for(k=blocks2[i2]; k<=ubnd; k++) { if( allnodes || pbuf.flag2[k] ) { v = pbuf.expbuf2[k]*pbuf.expbuf2[k]; pbuf.expbuf2[k] = v*v; } } } } } } } } // // Recycle buffer object back to pool // alglib.smp.ae_shared_pool_recycle(bufpool, ref pbuf); } /************************************************************************* Serial stub for GPL edition. *************************************************************************/ public static bool _trypexec_rbfv1gridcalc3vrec(rbfv1model s, double[] x0, int n0, double[] x1, int n1, double[] x2, int n2, int[] blocks0, int block0a, int block0b, int[] blocks1, int block1a, int block1b, int[] blocks2, int block2a, int block2b, bool[] flagy, bool sparsey, double searchradius, double avgfuncpernode, alglib.smp.shared_pool bufpool, double[] y, alglib.xparams _params) { return false; } /************************************************************************* This function "unpacks" RBF model by extracting its coefficients. INPUT PARAMETERS: S - RBF model OUTPUT PARAMETERS: NX - dimensionality of argument NY - dimensionality of the target function XWR - model information, array[NC,NX+NY+1]. One row of the array corresponds to one basis function: * first NX columns - coordinates of the center * next NY columns - weights, one per dimension of the function being modelled * last column - radius, same for all dimensions of the function being modelled NC - number of the centers V - polynomial term , array[NY,NX+1]. One row per one dimension of the function being modelled. First NX elements are linear coefficients, V[NX] is equal to the constant part. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfv1unpack(rbfv1model s, ref int nx, ref int ny, ref double[,] xwr, ref int nc, ref double[,] v, alglib.xparams _params) { int i = 0; int j = 0; double rcur = 0; int i_ = 0; int i1_ = 0; nx = 0; ny = 0; xwr = new double[0,0]; nc = 0; v = new double[0,0]; nx = s.nx; ny = s.ny; nc = s.nc; // // Fill V // v = new double[s.ny, s.nx+1]; for(i=0; i<=s.ny-1; i++) { for(i_=0; i_<=s.nx-1;i_++) { v[i,i_] = s.v[i,i_]; } v[i,s.nx] = s.v[i,mxnx]; } // // Fill XWR and V // if( nc*s.nl>0 ) { xwr = new double[s.nc*s.nl, s.nx+s.ny+1]; for(i=0; i<=s.nc-1; i++) { rcur = s.wr[i,0]; for(j=0; j<=s.nl-1; j++) { for(i_=0; i_<=s.nx-1;i_++) { xwr[i*s.nl+j,i_] = s.xc[i,i_]; } i1_ = (1+j*s.ny) - (s.nx); for(i_=s.nx; i_<=s.nx+s.ny-1;i_++) { xwr[i*s.nl+j,i_] = s.wr[i,i_+i1_]; } xwr[i*s.nl+j,s.nx+s.ny] = rcur; rcur = 0.5*rcur; } } } } private static bool rbfv1buildlinearmodel(double[,] x, ref double[,] y, int n, int ny, int modeltype, ref double[,] v, alglib.xparams _params) { bool result = new bool(); double[] tmpy = new double[0]; double[,] a = new double[0,0]; double scaling = 0; double[] shifting = new double[0]; double mn = 0; double mx = 0; double[] c = new double[0]; lsfit.lsfitreport rep = new lsfit.lsfitreport(); int i = 0; int j = 0; int k = 0; int info = 0; v = new double[0,0]; alglib.ap.assert(n>=0, "BuildLinearModel: N<0"); alglib.ap.assert(ny>0, "BuildLinearModel: NY<=0"); // // Handle degenerate case (N=0) // result = true; v = new double[ny, mxnx+1]; if( n==0 ) { for(j=0; j<=mxnx; j++) { for(i=0; i<=ny-1; i++) { v[i,j] = 0; } } return result; } // // Allocate temporaries // tmpy = new double[n]; // // General linear model. // if( modeltype==1 ) { // // Calculate scaling/shifting, transform variables, prepare LLS problem // a = new double[n, mxnx+1]; shifting = new double[mxnx]; scaling = 0; for(i=0; i<=mxnx-1; i++) { mn = x[0,i]; mx = mn; for(j=1; j<=n-1; j++) { if( (double)(mn)>(double)(x[j,i]) ) { mn = x[j,i]; } if( (double)(mx)<(double)(x[j,i]) ) { mx = x[j,i]; } } scaling = Math.Max(scaling, mx-mn); shifting[i] = 0.5*(mx+mn); } if( (double)(scaling)==(double)(0) ) { scaling = 1; } else { scaling = 0.5*scaling; } for(i=0; i<=n-1; i++) { for(j=0; j<=mxnx-1; j++) { a[i,j] = (x[i,j]-shifting[j])/scaling; } } for(i=0; i<=n-1; i++) { a[i,mxnx] = 1; } // // Solve linear system in transformed variables, make backward // for(i=0; i<=ny-1; i++) { for(j=0; j<=n-1; j++) { tmpy[j] = y[j,i]; } lsfit.lsfitlinear(tmpy, a, n, mxnx+1, ref info, ref c, rep, _params); if( info<=0 ) { result = false; return result; } for(j=0; j<=mxnx-1; j++) { v[i,j] = c[j]/scaling; } v[i,mxnx] = c[mxnx]; for(j=0; j<=mxnx-1; j++) { v[i,mxnx] = v[i,mxnx]-shifting[j]*v[i,j]; } for(j=0; j<=n-1; j++) { for(k=0; k<=mxnx-1; k++) { y[j,i] = y[j,i]-x[j,k]*v[i,k]; } y[j,i] = y[j,i]-v[i,mxnx]; } } return result; } // // Constant model, very simple // if( modeltype==2 ) { for(i=0; i<=ny-1; i++) { for(j=0; j<=mxnx; j++) { v[i,j] = 0; } for(j=0; j<=n-1; j++) { v[i,mxnx] = v[i,mxnx]+y[j,i]; } if( n>0 ) { v[i,mxnx] = v[i,mxnx]/n; } for(j=0; j<=n-1; j++) { y[j,i] = y[j,i]-v[i,mxnx]; } } return result; } // // Zero model // alglib.ap.assert(modeltype==3, "BuildLinearModel: unknown model type"); for(i=0; i<=ny-1; i++) { for(j=0; j<=mxnx; j++) { v[i,j] = 0; } } return result; } private static void buildrbfmodellsqr(double[,] x, ref double[,] y, double[,] xc, double[] r, int n, int nc, int ny, nearestneighbor.kdtree pointstree, nearestneighbor.kdtree centerstree, double epsort, double epserr, int maxits, ref int gnnz, ref int snnz, ref double[,] w, ref int info, ref int iterationscount, ref int nmv, alglib.xparams _params) { linlsqr.linlsqrstate state = new linlsqr.linlsqrstate(); linlsqr.linlsqrreport lsqrrep = new linlsqr.linlsqrreport(); sparse.sparsematrix spg = new sparse.sparsematrix(); sparse.sparsematrix sps = new sparse.sparsematrix(); int[] nearcenterscnt = new int[0]; int[] nearpointscnt = new int[0]; int[] skipnearpointscnt = new int[0]; int[] farpointscnt = new int[0]; int maxnearcenterscnt = 0; int maxnearpointscnt = 0; int maxfarpointscnt = 0; int sumnearcenterscnt = 0; int sumnearpointscnt = 0; int sumfarpointscnt = 0; double maxrad = 0; int[] pointstags = new int[0]; int[] centerstags = new int[0]; double[,] nearpoints = new double[0,0]; double[,] nearcenters = new double[0,0]; double[,] farpoints = new double[0,0]; int tmpi = 0; int pointscnt = 0; int centerscnt = 0; double[] xcx = new double[0]; double[] tmpy = new double[0]; double[] tc = new double[0]; double[] g = new double[0]; double[] c = new double[0]; int i = 0; int j = 0; int k = 0; int sind = 0; double[,] a = new double[0,0]; double vv = 0; double vx = 0; double vy = 0; double vz = 0; double vr = 0; double gnorm2 = 0; double[] tmp0 = new double[0]; double[] tmp1 = new double[0]; double[] tmp2 = new double[0]; double fx = 0; double[,] xx = new double[0,0]; double[,] cx = new double[0,0]; double mrad = 0; int i_ = 0; gnnz = 0; snnz = 0; w = new double[0,0]; info = 0; iterationscount = 0; nmv = 0; // // Handle special cases: NC=0 // if( nc==0 ) { info = 1; iterationscount = 0; nmv = 0; return; } // // Prepare for general case, NC>0 // xcx = new double[mxnx]; pointstags = new int[n]; centerstags = new int[nc]; info = -1; iterationscount = 0; nmv = 0; // // This block prepares quantities used to compute approximate cardinal basis functions (ACBFs): // * NearCentersCnt[] - array[NC], whose elements store number of near centers used to build ACBF // * NearPointsCnt[] - array[NC], number of near points used to build ACBF // * FarPointsCnt[] - array[NC], number of far points (ones where ACBF is nonzero) // * MaxNearCentersCnt - max(NearCentersCnt) // * MaxNearPointsCnt - max(NearPointsCnt) // * SumNearCentersCnt - sum(NearCentersCnt) // * SumNearPointsCnt - sum(NearPointsCnt) // * SumFarPointsCnt - sum(FarPointsCnt) // nearcenterscnt = new int[nc]; nearpointscnt = new int[nc]; skipnearpointscnt = new int[nc]; farpointscnt = new int[nc]; maxnearcenterscnt = 0; maxnearpointscnt = 0; maxfarpointscnt = 0; sumnearcenterscnt = 0; sumnearpointscnt = 0; sumfarpointscnt = 0; for(i=0; i<=nc-1; i++) { for(j=0; j<=mxnx-1; j++) { xcx[j] = xc[i,j]; } // // Determine number of near centers and maximum radius of near centers // nearcenterscnt[i] = nearestneighbor.kdtreequeryrnn(centerstree, xcx, r[i]*rbfnearradius, true, _params); nearestneighbor.kdtreequeryresultstags(centerstree, ref centerstags, _params); maxrad = 0; for(j=0; j<=nearcenterscnt[i]-1; j++) { maxrad = Math.Max(maxrad, Math.Abs(r[centerstags[j]])); } // // Determine number of near points (ones which used to build ACBF) // and skipped points (the most near points which are NOT used to build ACBF // and are NOT included in the near points count // skipnearpointscnt[i] = nearestneighbor.kdtreequeryrnn(pointstree, xcx, 0.1*r[i], true, _params); nearpointscnt[i] = nearestneighbor.kdtreequeryrnn(pointstree, xcx, (r[i]+maxrad)*rbfnearradius, true, _params)-skipnearpointscnt[i]; alglib.ap.assert(nearpointscnt[i]>=0, "BuildRBFModelLSQR: internal error"); // // Determine number of far points // farpointscnt[i] = nearestneighbor.kdtreequeryrnn(pointstree, xcx, Math.Max(r[i]*rbfnearradius+maxrad*rbffarradius, r[i]*rbffarradius), true, _params); // // calculate sum and max, make some basic checks // alglib.ap.assert(nearcenterscnt[i]>0, "BuildRBFModelLSQR: internal error"); maxnearcenterscnt = Math.Max(maxnearcenterscnt, nearcenterscnt[i]); maxnearpointscnt = Math.Max(maxnearpointscnt, nearpointscnt[i]); maxfarpointscnt = Math.Max(maxfarpointscnt, farpointscnt[i]); sumnearcenterscnt = sumnearcenterscnt+nearcenterscnt[i]; sumnearpointscnt = sumnearpointscnt+nearpointscnt[i]; sumfarpointscnt = sumfarpointscnt+farpointscnt[i]; } snnz = sumnearcenterscnt; gnnz = sumfarpointscnt; alglib.ap.assert(maxnearcenterscnt>0, "BuildRBFModelLSQR: internal error"); // // Allocate temporaries. // // NOTE: we want to avoid allocation of zero-size arrays, so we // use max(desired_size,1) instead of desired_size when performing // memory allocation. // a = new double[maxnearpointscnt+maxnearcenterscnt, maxnearcenterscnt]; tmpy = new double[maxnearpointscnt+maxnearcenterscnt]; g = new double[maxnearcenterscnt]; c = new double[maxnearcenterscnt]; nearcenters = new double[maxnearcenterscnt, mxnx]; nearpoints = new double[Math.Max(maxnearpointscnt, 1), mxnx]; farpoints = new double[Math.Max(maxfarpointscnt, 1), mxnx]; // // fill matrix SpG // sparse.sparsecreate(n, nc, gnnz, spg, _params); sparse.sparsecreate(nc, nc, snnz, sps, _params); for(i=0; i<=nc-1; i++) { centerscnt = nearcenterscnt[i]; // // main center // for(j=0; j<=mxnx-1; j++) { xcx[j] = xc[i,j]; } // // center's tree // tmpi = nearestneighbor.kdtreequeryknn(centerstree, xcx, centerscnt, true, _params); alglib.ap.assert(tmpi==centerscnt, "BuildRBFModelLSQR: internal error"); nearestneighbor.kdtreequeryresultsx(centerstree, ref cx, _params); nearestneighbor.kdtreequeryresultstags(centerstree, ref centerstags, _params); // // point's tree // mrad = 0; for(j=0; j<=centerscnt-1; j++) { mrad = Math.Max(mrad, r[centerstags[j]]); } // // we need to be sure that 'CTree' contains // at least one side center // sparse.sparseset(sps, i, i, 1, _params); c[0] = 1.0; for(j=1; j<=centerscnt-1; j++) { c[j] = 0.0; } if( centerscnt>1 && nearpointscnt[i]>0 ) { // // first KDTree request for points // pointscnt = nearpointscnt[i]; tmpi = nearestneighbor.kdtreequeryknn(pointstree, xcx, skipnearpointscnt[i]+nearpointscnt[i], true, _params); alglib.ap.assert(tmpi==skipnearpointscnt[i]+nearpointscnt[i], "BuildRBFModelLSQR: internal error"); nearestneighbor.kdtreequeryresultsx(pointstree, ref xx, _params); sind = skipnearpointscnt[i]; for(j=0; j<=pointscnt-1; j++) { vx = xx[sind+j,0]; vy = xx[sind+j,1]; vz = xx[sind+j,2]; for(k=0; k<=centerscnt-1; k++) { vr = 0.0; vv = vx-cx[k,0]; vr = vr+vv*vv; vv = vy-cx[k,1]; vr = vr+vv*vv; vv = vz-cx[k,2]; vr = vr+vv*vv; vv = r[centerstags[k]]; a[j,k] = Math.Exp(-(vr/(vv*vv))); } } for(j=0; j<=centerscnt-1; j++) { g[j] = Math.Exp(-((math.sqr(xcx[0]-cx[j,0])+math.sqr(xcx[1]-cx[j,1])+math.sqr(xcx[2]-cx[j,2]))/math.sqr(r[centerstags[j]]))); } // // calculate the problem // gnorm2 = 0.0; for(i_=0; i_<=centerscnt-1;i_++) { gnorm2 += g[i_]*g[i_]; } for(j=0; j<=pointscnt-1; j++) { vv = 0.0; for(i_=0; i_<=centerscnt-1;i_++) { vv += a[j,i_]*g[i_]; } vv = vv/gnorm2; tmpy[j] = -vv; for(i_=0; i_<=centerscnt-1;i_++) { a[j,i_] = a[j,i_] - vv*g[i_]; } } for(j=pointscnt; j<=pointscnt+centerscnt-1; j++) { for(k=0; k<=centerscnt-1; k++) { a[j,k] = 0.0; } a[j,j-pointscnt] = 1.0E-6; tmpy[j] = 0.0; } fbls.fblssolvels(ref a, ref tmpy, pointscnt+centerscnt, centerscnt, ref tmp0, ref tmp1, ref tmp2, _params); for(i_=0; i_<=centerscnt-1;i_++) { c[i_] = tmpy[i_]; } vv = 0.0; for(i_=0; i_<=centerscnt-1;i_++) { vv += g[i_]*c[i_]; } vv = vv/gnorm2; for(i_=0; i_<=centerscnt-1;i_++) { c[i_] = c[i_] - vv*g[i_]; } vv = 1/gnorm2; for(i_=0; i_<=centerscnt-1;i_++) { c[i_] = c[i_] + vv*g[i_]; } for(j=0; j<=centerscnt-1; j++) { sparse.sparseset(sps, i, centerstags[j], c[j], _params); } } // // second KDTree request for points // pointscnt = farpointscnt[i]; tmpi = nearestneighbor.kdtreequeryknn(pointstree, xcx, pointscnt, true, _params); alglib.ap.assert(tmpi==pointscnt, "BuildRBFModelLSQR: internal error"); nearestneighbor.kdtreequeryresultsx(pointstree, ref xx, _params); nearestneighbor.kdtreequeryresultstags(pointstree, ref pointstags, _params); // //fill SpG matrix // for(j=0; j<=pointscnt-1; j++) { fx = 0; vx = xx[j,0]; vy = xx[j,1]; vz = xx[j,2]; for(k=0; k<=centerscnt-1; k++) { vr = 0.0; vv = vx-cx[k,0]; vr = vr+vv*vv; vv = vy-cx[k,1]; vr = vr+vv*vv; vv = vz-cx[k,2]; vr = vr+vv*vv; vv = r[centerstags[k]]; vv = vv*vv; fx = fx+c[k]*Math.Exp(-(vr/vv)); } sparse.sparseset(spg, pointstags[j], i, fx, _params); } } sparse.sparseconverttocrs(spg, _params); sparse.sparseconverttocrs(sps, _params); // // solve by LSQR method // tmpy = new double[n]; tc = new double[nc]; w = new double[nc, ny]; linlsqr.linlsqrcreate(n, nc, state, _params); linlsqr.linlsqrsetcond(state, epsort, epserr, maxits, _params); for(i=0; i<=ny-1; i++) { for(j=0; j<=n-1; j++) { tmpy[j] = y[j,i]; } linlsqr.linlsqrsolvesparse(state, spg, tmpy, _params); linlsqr.linlsqrresults(state, ref c, lsqrrep, _params); if( lsqrrep.terminationtype<=0 ) { info = -4; return; } sparse.sparsemtv(sps, c, ref tc, _params); for(j=0; j<=nc-1; j++) { w[j,i] = tc[j]; } iterationscount = iterationscount+lsqrrep.iterationscount; nmv = nmv+lsqrrep.nmv; } info = 1; } private static void buildrbfmlayersmodellsqr(double[,] x, ref double[,] y, ref double[,] xc, double rval, ref double[] r, int n, ref int nc, int ny, int nlayers, nearestneighbor.kdtree centerstree, double epsort, double epserr, int maxits, double lambdav, ref int annz, ref double[,] w, ref int info, ref int iterationscount, ref int nmv, alglib.xparams _params) { linlsqr.linlsqrstate state = new linlsqr.linlsqrstate(); linlsqr.linlsqrreport lsqrrep = new linlsqr.linlsqrreport(); sparse.sparsematrix spa = new sparse.sparsematrix(); double anorm = 0; double[] omega = new double[0]; double[] xx = new double[0]; double[] tmpy = new double[0]; double[,] cx = new double[0,0]; double yval = 0; int nec = 0; int[] centerstags = new int[0]; int layer = 0; int i = 0; int j = 0; int k = 0; double v = 0; double rmaxbefore = 0; double rmaxafter = 0; xc = new double[0,0]; r = new double[0]; nc = 0; annz = 0; w = new double[0,0]; info = 0; iterationscount = 0; nmv = 0; alglib.ap.assert(nlayers>=0, "BuildRBFMLayersModelLSQR: invalid argument(NLayers<0)"); alglib.ap.assert(n>=0, "BuildRBFMLayersModelLSQR: invalid argument(N<0)"); alglib.ap.assert(mxnx>0 && mxnx<=3, "BuildRBFMLayersModelLSQR: internal error(invalid global const MxNX: either MxNX<=0 or MxNX>3)"); annz = 0; if( n==0 || nlayers==0 ) { info = 1; iterationscount = 0; nmv = 0; return; } nc = n*nlayers; xx = new double[mxnx]; centerstags = new int[n]; xc = new double[nc, mxnx]; r = new double[nc]; for(i=0; i<=nc-1; i++) { for(j=0; j<=mxnx-1; j++) { xc[i,j] = x[i%n,j]; } } for(i=0; i<=nc-1; i++) { r[i] = rval/Math.Pow(2, i/n); } for(i=0; i<=n-1; i++) { centerstags[i] = i; } nearestneighbor.kdtreebuildtagged(xc, centerstags, n, mxnx, 0, 2, centerstree, _params); omega = new double[n]; tmpy = new double[n]; w = new double[nc, ny]; info = -1; iterationscount = 0; nmv = 0; linlsqr.linlsqrcreate(n, n, state, _params); linlsqr.linlsqrsetcond(state, epsort, epserr, maxits, _params); linlsqr.linlsqrsetlambdai(state, 1.0E-6, _params); // // calculate number of non-zero elements for sparse matrix // for(i=0; i<=n-1; i++) { for(j=0; j<=mxnx-1; j++) { xx[j] = x[i,j]; } annz = annz+nearestneighbor.kdtreequeryrnn(centerstree, xx, r[0]*rbfmlradius, true, _params); } for(layer=0; layer<=nlayers-1; layer++) { // // Fill sparse matrix, calculate norm(A) // anorm = 0.0; sparse.sparsecreate(n, n, annz, spa, _params); for(i=0; i<=n-1; i++) { for(j=0; j<=mxnx-1; j++) { xx[j] = x[i,j]; } nec = nearestneighbor.kdtreequeryrnn(centerstree, xx, r[layer*n]*rbfmlradius, true, _params); nearestneighbor.kdtreequeryresultsx(centerstree, ref cx, _params); nearestneighbor.kdtreequeryresultstags(centerstree, ref centerstags, _params); for(j=0; j<=nec-1; j++) { v = Math.Exp(-((math.sqr(xx[0]-cx[j,0])+math.sqr(xx[1]-cx[j,1])+math.sqr(xx[2]-cx[j,2]))/math.sqr(r[layer*n+centerstags[j]]))); sparse.sparseset(spa, i, centerstags[j], v, _params); anorm = anorm+math.sqr(v); } } anorm = Math.Sqrt(anorm); sparse.sparseconverttocrs(spa, _params); // // Calculate maximum residual before adding new layer. // This value is not used by algorithm, the only purpose is to make debugging easier. // rmaxbefore = 0.0; for(j=0; j<=n-1; j++) { for(i=0; i<=ny-1; i++) { rmaxbefore = Math.Max(rmaxbefore, Math.Abs(y[j,i])); } } // // Process NY dimensions of the target function // for(i=0; i<=ny-1; i++) { for(j=0; j<=n-1; j++) { tmpy[j] = y[j,i]; } // // calculate Omega for current layer // linlsqr.linlsqrsetlambdai(state, lambdav*anorm/n, _params); linlsqr.linlsqrsolvesparse(state, spa, tmpy, _params); linlsqr.linlsqrresults(state, ref omega, lsqrrep, _params); if( lsqrrep.terminationtype<=0 ) { info = -4; return; } // // calculate error for current layer // for(j=0; j<=n-1; j++) { yval = 0; for(k=0; k<=mxnx-1; k++) { xx[k] = x[j,k]; } nec = nearestneighbor.kdtreequeryrnn(centerstree, xx, r[layer*n]*rbffarradius, true, _params); nearestneighbor.kdtreequeryresultsx(centerstree, ref cx, _params); nearestneighbor.kdtreequeryresultstags(centerstree, ref centerstags, _params); for(k=0; k<=nec-1; k++) { yval = yval+omega[centerstags[k]]*Math.Exp(-((math.sqr(xx[0]-cx[k,0])+math.sqr(xx[1]-cx[k,1])+math.sqr(xx[2]-cx[k,2]))/math.sqr(r[layer*n+centerstags[k]]))); } y[j,i] = y[j,i]-yval; } // // write Omega in out parameter W // for(j=0; j<=n-1; j++) { w[layer*n+j,i] = omega[j]; } iterationscount = iterationscount+lsqrrep.iterationscount; nmv = nmv+lsqrrep.nmv; } // // Calculate maximum residual before adding new layer. // This value is not used by algorithm, the only purpose is to make debugging easier. // rmaxafter = 0.0; for(j=0; j<=n-1; j++) { for(i=0; i<=ny-1; i++) { rmaxafter = Math.Max(rmaxafter, Math.Abs(y[j,i])); } } } info = 1; } } public class rbf { /************************************************************************* Buffer object which is used to perform nearest neighbor requests in the multithreaded mode (multiple threads working with same KD-tree object). This object should be created with KDTreeCreateBuffer(). *************************************************************************/ public class rbfcalcbuffer : apobject { public int modelversion; public rbfv1.rbfv1calcbuffer bufv1; public rbfv2.rbfv2calcbuffer bufv2; public rbfcalcbuffer() { init(); } public override void init() { bufv1 = new rbfv1.rbfv1calcbuffer(); bufv2 = new rbfv2.rbfv2calcbuffer(); } public override alglib.apobject make_copy() { rbfcalcbuffer _result = new rbfcalcbuffer(); _result.modelversion = modelversion; _result.bufv1 = (rbfv1.rbfv1calcbuffer)bufv1.make_copy(); _result.bufv2 = (rbfv2.rbfv2calcbuffer)bufv2.make_copy(); return _result; } }; /************************************************************************* RBF model. Never try to directly work with fields of this object - always use ALGLIB functions to use this object. *************************************************************************/ public class rbfmodel : apobject { public int nx; public int ny; public int modelversion; public rbfv1.rbfv1model model1; public rbfv2.rbfv2model model2; public double lambdav; public double radvalue; public double radzvalue; public int nlayers; public int aterm; public int algorithmtype; public double epsort; public double epserr; public int maxits; public int nnmaxits; public int n; public double[,] x; public double[,] y; public bool hasscale; public double[] s; public int progress10000; public bool terminationrequest; public rbfmodel() { init(); } public override void init() { model1 = new rbfv1.rbfv1model(); model2 = new rbfv2.rbfv2model(); x = new double[0,0]; y = new double[0,0]; s = new double[0]; } public override alglib.apobject make_copy() { rbfmodel _result = new rbfmodel(); _result.nx = nx; _result.ny = ny; _result.modelversion = modelversion; _result.model1 = (rbfv1.rbfv1model)model1.make_copy(); _result.model2 = (rbfv2.rbfv2model)model2.make_copy(); _result.lambdav = lambdav; _result.radvalue = radvalue; _result.radzvalue = radzvalue; _result.nlayers = nlayers; _result.aterm = aterm; _result.algorithmtype = algorithmtype; _result.epsort = epsort; _result.epserr = epserr; _result.maxits = maxits; _result.nnmaxits = nnmaxits; _result.n = n; _result.x = (double[,])x.Clone(); _result.y = (double[,])y.Clone(); _result.hasscale = hasscale; _result.s = (double[])s.Clone(); _result.progress10000 = progress10000; _result.terminationrequest = terminationrequest; return _result; } }; /************************************************************************* RBF solution report: * TerminationType - termination type, positive values - success, non-positive - failure. Fields which are set by modern RBF solvers (hierarchical): * RMSError - root-mean-square error; NAN for old solvers (ML, QNN) * MaxError - maximum error; NAN for old solvers (ML, QNN) *************************************************************************/ public class rbfreport : apobject { public double rmserror; public double maxerror; public int arows; public int acols; public int annz; public int iterationscount; public int nmv; public int terminationtype; public rbfreport() { init(); } public override void init() { } public override alglib.apobject make_copy() { rbfreport _result = new rbfreport(); _result.rmserror = rmserror; _result.maxerror = maxerror; _result.arows = arows; _result.acols = acols; _result.annz = annz; _result.iterationscount = iterationscount; _result.nmv = nmv; _result.terminationtype = terminationtype; return _result; } }; public const double eps = 1.0E-6; public const double rbffarradius = 6; public const int rbffirstversion = 0; public const int rbfversion2 = 2; /************************************************************************* This function creates RBF model for a scalar (NY=1) or vector (NY>1) function in a NX-dimensional space (NX>=1). Newly created model is empty. It can be used for interpolation right after creation, but it just returns zeros. You have to add points to the model, tune interpolation settings, and then call model construction function rbfbuildmodel() which will update model according to your specification. USAGE: 1. User creates model with rbfcreate() 2. User adds dataset with rbfsetpoints() (points do NOT have to be on a regular grid) or rbfsetpointsandscales(). 3. (OPTIONAL) User chooses polynomial term by calling: * rbflinterm() to set linear term * rbfconstterm() to set constant term * rbfzeroterm() to set zero term By default, linear term is used. 4. User tweaks algorithm properties with rbfsetalgohierarchical() method (or chooses one of the legacy algorithms - QNN (rbfsetalgoqnn) or ML (rbfsetalgomultilayer)). 5. User calls rbfbuildmodel() function which rebuilds model according to the specification 6. User may call rbfcalc() to calculate model value at the specified point, rbfgridcalc() to calculate model values at the points of the regular grid. User may extract model coefficients with rbfunpack() call. IMPORTANT: we recommend you to use latest model construction algorithm - hierarchical RBFs, which is activated by rbfsetalgohierarchical() function. This algorithm is the fastest one, and most memory- efficient. However, it is incompatible with older versions of ALGLIB (pre-3.11). So, if you serialize hierarchical model, you will be unable to load it in pre-3.11 ALGLIB. Other model types (QNN and RBF-ML) are still backward-compatible. INPUT PARAMETERS: NX - dimension of the space, NX>=1 NY - function dimension, NY>=1 OUTPUT PARAMETERS: S - RBF model (initially equals to zero) NOTE 1: memory requirements. RBF models require amount of memory which is proportional to the number of data points. Some additional memory is allocated during model construction, but most of this memory is freed after model coefficients are calculated. Amount of this additional memory depends on model construction algorithm being used. NOTE 2: prior to ALGLIB version 3.11, RBF models supported only NX=2 or NX=3. Any attempt to create single-dimensional or more than 3-dimensional RBF model resulted in exception. ALGLIB 3.11 supports any NX>0, but models created with NX!=2 and NX!=3 are incompatible with (a) older versions of ALGLIB, (b) old model construction algorithms (QNN or RBF-ML). So, if you create a model with NX=2 or NX=3, then, depending on specific model construction algorithm being chosen, you will (QNN and RBF-ML) or will not (HierarchicalRBF) get backward compatibility with older versions of ALGLIB. You have a choice here. However, if you create a model with NX neither 2 nor 3, you have no backward compatibility from the start, and you are forced to use hierarchical RBFs and ALGLIB 3.11 or later. -- ALGLIB -- Copyright 13.12.2011, 20.06.2016 by Bochkanov Sergey *************************************************************************/ public static void rbfcreate(int nx, int ny, rbfmodel s, alglib.xparams _params) { alglib.ap.assert(nx>=1, "RBFCreate: NX<1"); alglib.ap.assert(ny>=1, "RBFCreate: NY<1"); s.nx = nx; s.ny = ny; rbfpreparenonserializablefields(s, _params); // // Select default model version according to NX. // // The idea is that when we call this function with NX=2 or NX=3, backward // compatible dummy (zero) V1 model is created, so serialization produces // model which are compatible with pre-3.11 ALGLIB. // initializev1(nx, ny, s.model1, _params); initializev2(nx, ny, s.model2, _params); if( nx==2 || nx==3 ) { s.modelversion = 1; } else { s.modelversion = 2; } // // Report fields // s.progress10000 = 0; s.terminationrequest = false; } /************************************************************************* This function creates buffer structure which can be used to perform parallel RBF model evaluations (with one RBF model instance being used from multiple threads, as long as different threads use different instances of buffer). This buffer object can be used with rbftscalcbuf() function (here "ts" stands for "thread-safe", "buf" is a suffix which denotes function which reuses previously allocated output space). How to use it: * create RBF model structure with rbfcreate() * load data, tune parameters * call rbfbuildmodel() * call rbfcreatecalcbuffer(), once per thread working with RBF model (you should call this function only AFTER call to rbfbuildmodel(), see below for more information) * call rbftscalcbuf() from different threads, with each thread working with its own copy of buffer object. INPUT PARAMETERS S - RBF model OUTPUT PARAMETERS Buf - external buffer. IMPORTANT: buffer object should be used only with RBF model object which was used to initialize buffer. Any attempt to use buffer with different object is dangerous - you may get memory violation error because sizes of internal arrays do not fit to dimensions of RBF structure. IMPORTANT: you should call this function only for model which was built with rbfbuildmodel() function, after successful invocation of rbfbuildmodel(). Sizes of some internal structures are determined only after model is built, so buffer object created before model construction stage will be useless (and any attempt to use it will result in exception). -- ALGLIB -- Copyright 02.04.2016 by Sergey Bochkanov *************************************************************************/ public static void rbfcreatecalcbuffer(rbfmodel s, rbfcalcbuffer buf, alglib.xparams _params) { if( s.modelversion==1 ) { buf.modelversion = 1; rbfv1.rbfv1createcalcbuffer(s.model1, buf.bufv1, _params); return; } if( s.modelversion==2 ) { buf.modelversion = 2; rbfv2.rbfv2createcalcbuffer(s.model2, buf.bufv2, _params); return; } alglib.ap.assert(false, "RBFCreateCalcBuffer: integrity check failed"); } /************************************************************************* This function adds dataset. This function overrides results of the previous calls, i.e. multiple calls of this function will result in only the last set being added. IMPORTANT: ALGLIB version 3.11 and later allows you to specify a set of per-dimension scales. Interpolation radii are multiplied by the scale vector. It may be useful if you have mixed spatio-temporal data (say, a set of 3D slices recorded at different times). You should call rbfsetpointsandscales() function to use this feature. INPUT PARAMETERS: S - RBF model, initialized by rbfcreate() call. XY - points, array[N,NX+NY]. One row corresponds to one point in the dataset. First NX elements are coordinates, next NY elements are function values. Array may be larger than specified, in this case only leading [N,NX+NY] elements will be used. N - number of points in the dataset After you've added dataset and (optionally) tuned algorithm settings you should call rbfbuildmodel() in order to build a model for you. NOTE: dataset added by this function is not saved during model serialization. MODEL ITSELF is serialized, but data used to build it are not. So, if you 1) add dataset to empty RBF model, 2) serialize and unserialize it, then you will get an empty RBF model with no dataset being attached. From the other side, if you call rbfbuildmodel() between (1) and (2), then after (2) you will get your fully constructed RBF model - but again with no dataset attached, so subsequent calls to rbfbuildmodel() will produce empty model. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfsetpoints(rbfmodel s, double[,] xy, int n, alglib.xparams _params) { int i = 0; int j = 0; alglib.ap.assert(n>0, "RBFSetPoints: N<0"); alglib.ap.assert(alglib.ap.rows(xy)>=n, "RBFSetPoints: Rows(XY)=s.nx+s.ny, "RBFSetPoints: Cols(XY)0. After you've added dataset and (optionally) tuned algorithm settings you should call rbfbuildmodel() in order to build a model for you. NOTE: dataset added by this function is not saved during model serialization. MODEL ITSELF is serialized, but data used to build it are not. So, if you 1) add dataset to empty RBF model, 2) serialize and unserialize it, then you will get an empty RBF model with no dataset being attached. From the other side, if you call rbfbuildmodel() between (1) and (2), then after (2) you will get your fully constructed RBF model - but again with no dataset attached, so subsequent calls to rbfbuildmodel() will produce empty model. -- ALGLIB -- Copyright 20.06.2016 by Bochkanov Sergey *************************************************************************/ public static void rbfsetpointsandscales(rbfmodel r, double[,] xy, int n, double[] s, alglib.xparams _params) { int i = 0; int j = 0; alglib.ap.assert(n>0, "RBFSetPointsAndScales: N<0"); alglib.ap.assert(alglib.ap.rows(xy)>=n, "RBFSetPointsAndScales: Rows(XY)=r.nx+r.ny, "RBFSetPointsAndScales: Cols(XY)=r.nx, "RBFSetPointsAndScales: Length(S)(double)(0), "RBFSetPointsAndScales: S[i]<=0"); r.s[i] = s[i]; } } /************************************************************************* DEPRECATED:since version 3.11 ALGLIB includes new RBF model construction algorithm, Hierarchical RBF. This algorithm is faster and requires less memory than QNN and RBF-ML. It is especially good for large-scale interpolation problems. So, we recommend you to consider Hierarchical RBF as default option. ========================================================================== This function sets RBF interpolation algorithm. ALGLIB supports several RBF algorithms with different properties. This algorithm is called RBF-QNN and it is good for point sets with following properties: a) all points are distinct b) all points are well separated. c) points distribution is approximately uniform. There is no "contour lines", clusters of points, or other small-scale structures. Algorithm description: 1) interpolation centers are allocated to data points 2) interpolation radii are calculated as distances to the nearest centers times Q coefficient (where Q is a value from [0.75,1.50]). 3) after performing (2) radii are transformed in order to avoid situation when single outlier has very large radius and influences many points across all dataset. Transformation has following form: new_r[i] = min(r[i],Z*median(r[])) where r[i] is I-th radius, median() is a median radius across entire dataset, Z is user-specified value which controls amount of deviation from median radius. When (a) is violated, we will be unable to build RBF model. When (b) or (c) are violated, model will be built, but interpolation quality will be low. See http://www.alglib.net/interpolation/ for more information on this subject. This algorithm is used by default. Additional Q parameter controls smoothness properties of the RBF basis: * Q<0.75 will give perfectly conditioned basis, but terrible smoothness properties (RBF interpolant will have sharp peaks around function values) * Q around 1.0 gives good balance between smoothness and condition number * Q>1.5 will lead to badly conditioned systems and slow convergence of the underlying linear solver (although smoothness will be very good) * Q>2.0 will effectively make optimizer useless because it won't converge within reasonable amount of iterations. It is possible to set such large Q, but it is advised not to do so. INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call Q - Q parameter, Q>0, recommended value - 1.0 Z - Z parameter, Z>0, recommended value - 5.0 NOTE: this function has some serialization-related subtleties. We recommend you to study serialization examples from ALGLIB Reference Manual if you want to perform serialization of your models. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfsetalgoqnn(rbfmodel s, double q, double z, alglib.xparams _params) { alglib.ap.assert(math.isfinite(q), "RBFSetAlgoQNN: Q is infinite or NAN"); alglib.ap.assert((double)(q)>(double)(0), "RBFSetAlgoQNN: Q<=0"); alglib.ap.assert(math.isfinite(z), "RBFSetAlgoQNN: Z is infinite or NAN"); alglib.ap.assert((double)(z)>(double)(0), "RBFSetAlgoQNN: Z<=0"); s.radvalue = q; s.radzvalue = z; s.algorithmtype = 1; } /************************************************************************* DEPRECATED:since version 3.11 ALGLIB includes new RBF model construction algorithm, Hierarchical RBF. This algorithm is faster and requires less memory than QNN and RBF-ML. It is especially good for large-scale interpolation problems. So, we recommend you to consider Hierarchical RBF as default option. ========================================================================== This function sets RBF interpolation algorithm. ALGLIB supports several RBF algorithms with different properties. This algorithm is called RBF-ML. It builds multilayer RBF model, i.e. model with subsequently decreasing radii, which allows us to combine smoothness (due to large radii of the first layers) with exactness (due to small radii of the last layers) and fast convergence. Internally RBF-ML uses many different means of acceleration, from sparse matrices to KD-trees, which results in algorithm whose working time is roughly proportional to N*log(N)*Density*RBase^2*NLayers, where N is a number of points, Density is an average density if points per unit of the interpolation space, RBase is an initial radius, NLayers is a number of layers. RBF-ML is good for following kinds of interpolation problems: 1. "exact" problems (perfect fit) with well separated points 2. least squares problems with arbitrary distribution of points (algorithm gives perfect fit where it is possible, and resorts to least squares fit in the hard areas). 3. noisy problems where we want to apply some controlled amount of smoothing. INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call RBase - RBase parameter, RBase>0 NLayers - NLayers parameter, NLayers>0, recommended value to start with - about 5. LambdaV - regularization value, can be useful when solving problem in the least squares sense. Optimal lambda is problem- dependent and require trial and error. In our experience, good lambda can be as large as 0.1, and you can use 0.001 as initial guess. Default value - 0.01, which is used when LambdaV is not given. You can specify zero value, but it is not recommended to do so. TUNING ALGORITHM In order to use this algorithm you have to choose three parameters: * initial radius RBase * number of layers in the model NLayers * regularization coefficient LambdaV Initial radius is easy to choose - you can pick any number several times larger than the average distance between points. Algorithm won't break down if you choose radius which is too large (model construction time will increase, but model will be built correctly). Choose such number of layers that RLast=RBase/2^(NLayers-1) (radius used by the last layer) will be smaller than the typical distance between points. In case model error is too large, you can increase number of layers. Having more layers will make model construction and evaluation proportionally slower, but it will allow you to have model which precisely fits your data. From the other side, if you want to suppress noise, you can DECREASE number of layers to make your model less flexible. Regularization coefficient LambdaV controls smoothness of the individual models built for each layer. We recommend you to use default value in case you don't want to tune this parameter, because having non-zero LambdaV accelerates and stabilizes internal iterative algorithm. In case you want to suppress noise you can use LambdaV as additional parameter (larger value = more smoothness) to tune. TYPICAL ERRORS 1. Using initial radius which is too large. Memory requirements of the RBF-ML are roughly proportional to N*Density*RBase^2 (where Density is an average density of points per unit of the interpolation space). In the extreme case of the very large RBase we will need O(N^2) units of memory - and many layers in order to decrease radius to some reasonably small value. 2. Using too small number of layers - RBF models with large radius are not flexible enough to reproduce small variations in the target function. You need many layers with different radii, from large to small, in order to have good model. 3. Using initial radius which is too small. You will get model with "holes" in the areas which are too far away from interpolation centers. However, algorithm will work correctly (and quickly) in this case. 4. Using too many layers - you will get too large and too slow model. This model will perfectly reproduce your function, but maybe you will be able to achieve similar results with less layers (and less memory). -- ALGLIB -- Copyright 02.03.2012 by Bochkanov Sergey *************************************************************************/ public static void rbfsetalgomultilayer(rbfmodel s, double rbase, int nlayers, double lambdav, alglib.xparams _params) { alglib.ap.assert(math.isfinite(rbase), "RBFSetAlgoMultiLayer: RBase is infinite or NaN"); alglib.ap.assert((double)(rbase)>(double)(0), "RBFSetAlgoMultiLayer: RBase<=0"); alglib.ap.assert(nlayers>=0, "RBFSetAlgoMultiLayer: NLayers<0"); alglib.ap.assert(math.isfinite(lambdav), "RBFSetAlgoMultiLayer: LambdaV is infinite or NAN"); alglib.ap.assert((double)(lambdav)>=(double)(0), "RBFSetAlgoMultiLayer: LambdaV<0"); s.radvalue = rbase; s.nlayers = nlayers; s.algorithmtype = 2; s.lambdav = lambdav; } /************************************************************************* This function sets RBF interpolation algorithm. ALGLIB supports several RBF algorithms with different properties. This algorithm is called Hierarchical RBF. It similar to its previous incarnation, RBF-ML, i.e. it also builds a sequence of models with decreasing radii. However, it uses more economical way of building upper layers (ones with large radii), which results in faster model construction and evaluation, as well as smaller memory footprint during construction. This algorithm has following important features: * ability to handle millions of points * controllable smoothing via nonlinearity penalization * support for NX-dimensional models with NX=1 or NX>3 (unlike QNN or RBF-ML) * support for specification of per-dimensional radii via scale vector, which is set by means of rbfsetpointsandscales() function. This feature is useful if you solve spatio-temporal interpolation problems, where different radii are required for spatial and temporal dimensions. Running times are roughly proportional to: * N*log(N)*NLayers - for model construction * N*NLayers - for model evaluation You may see that running time does not depend on search radius or points density, just on number of layers in the hierarchy. IMPORTANT: this model construction algorithm was introduced in ALGLIB 3.11 and produces models which are INCOMPATIBLE with previous versions of ALGLIB. You can not unserialize models produced with this function in ALGLIB 3.10 or earlier. INPUT PARAMETERS: S - RBF model, initialized by rbfcreate() call RBase - RBase parameter, RBase>0 NLayers - NLayers parameter, NLayers>0, recommended value to start with - about 5. LambdaNS- >=0, nonlinearity penalty coefficient, negative values are not allowed. This parameter adds controllable smoothing to the problem, which may reduce noise. Specification of non- zero lambda means that in addition to fitting error solver will also minimize LambdaNS*|S''(x)|^2 (appropriately generalized to multiple dimensions. Specification of exactly zero value means that no penalty is added (we do not even evaluate matrix of second derivatives which is necessary for smoothing). Calculation of nonlinearity penalty is costly - it results in several-fold increase of model construction time. Evaluation time remains the same. Optimal lambda is problem-dependent and requires trial and error. Good value to start from is 1e-5...1e-6, which corresponds to slightly noticeable smoothing of the function. Value 1e-2 usually means that quite heavy smoothing is applied. TUNING ALGORITHM In order to use this algorithm you have to choose three parameters: * initial radius RBase * number of layers in the model NLayers * penalty coefficient LambdaNS Initial radius is easy to choose - you can pick any number several times larger than the average distance between points. Algorithm won't break down if you choose radius which is too large (model construction time will increase, but model will be built correctly). Choose such number of layers that RLast=RBase/2^(NLayers-1) (radius used by the last layer) will be smaller than the typical distance between points. In case model error is too large, you can increase number of layers. Having more layers will make model construction and evaluation proportionally slower, but it will allow you to have model which precisely fits your data. From the other side, if you want to suppress noise, you can DECREASE number of layers to make your model less flexible (or specify non-zero LambdaNS). TYPICAL ERRORS 1. Using too small number of layers - RBF models with large radius are not flexible enough to reproduce small variations in the target function. You need many layers with different radii, from large to small, in order to have good model. 2. Using initial radius which is too small. You will get model with "holes" in the areas which are too far away from interpolation centers. However, algorithm will work correctly (and quickly) in this case. -- ALGLIB -- Copyright 20.06.2016 by Bochkanov Sergey *************************************************************************/ public static void rbfsetalgohierarchical(rbfmodel s, double rbase, int nlayers, double lambdans, alglib.xparams _params) { alglib.ap.assert(math.isfinite(rbase), "RBFSetAlgoHierarchical: RBase is infinite or NaN"); alglib.ap.assert((double)(rbase)>(double)(0), "RBFSetAlgoHierarchical: RBase<=0"); alglib.ap.assert(nlayers>=0, "RBFSetAlgoHierarchical: NLayers<0"); alglib.ap.assert(math.isfinite(lambdans) && (double)(lambdans)>=(double)(0), "RBFSetAlgoHierarchical: LambdaNS<0 or infinite"); s.radvalue = rbase; s.nlayers = nlayers; s.algorithmtype = 3; s.lambdav = lambdans; } /************************************************************************* This function sets linear term (model is a sum of radial basis functions plus linear polynomial). This function won't have effect until next call to RBFBuildModel(). INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call NOTE: this function has some serialization-related subtleties. We recommend you to study serialization examples from ALGLIB Reference Manual if you want to perform serialization of your models. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfsetlinterm(rbfmodel s, alglib.xparams _params) { s.aterm = 1; } /************************************************************************* This function sets constant term (model is a sum of radial basis functions plus constant). This function won't have effect until next call to RBFBuildModel(). INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call NOTE: this function has some serialization-related subtleties. We recommend you to study serialization examples from ALGLIB Reference Manual if you want to perform serialization of your models. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfsetconstterm(rbfmodel s, alglib.xparams _params) { s.aterm = 2; } /************************************************************************* This function sets zero term (model is a sum of radial basis functions without polynomial term). This function won't have effect until next call to RBFBuildModel(). INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call NOTE: this function has some serialization-related subtleties. We recommend you to study serialization examples from ALGLIB Reference Manual if you want to perform serialization of your models. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfsetzeroterm(rbfmodel s, alglib.xparams _params) { s.aterm = 3; } /************************************************************************* This function sets basis function type, which can be: * 0 for classic Gaussian * 1 for fast and compact bell-like basis function, which becomes exactly zero at distance equal to 3*R (default option). INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call BF - basis function type: * 0 - classic Gaussian * 1 - fast and compact one -- ALGLIB -- Copyright 01.02.2017 by Bochkanov Sergey *************************************************************************/ public static void rbfsetv2bf(rbfmodel s, int bf, alglib.xparams _params) { alglib.ap.assert(bf==0 || bf==1, "RBFSetV2Its: BF<>0 and BF<>1"); s.model2.basisfunction = bf; } /************************************************************************* This function sets stopping criteria of the underlying linear solver for hierarchical (version 2) RBF constructor. INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call MaxIts - this criterion will stop algorithm after MaxIts iterations. Typically a few hundreds iterations is required, with 400 being a good default value to start experimentation. Zero value means that default value will be selected. -- ALGLIB -- Copyright 01.02.2017 by Bochkanov Sergey *************************************************************************/ public static void rbfsetv2its(rbfmodel s, int maxits, alglib.xparams _params) { alglib.ap.assert(maxits>=0, "RBFSetV2Its: MaxIts is negative"); s.model2.maxits = maxits; } /************************************************************************* This function sets support radius parameter of hierarchical (version 2) RBF constructor. Hierarchical RBF model achieves great speed-up by removing from the model excessive (too dense) nodes. Say, if you have RBF radius equal to 1 meter, and two nodes are just 1 millimeter apart, you may remove one of them without reducing model quality. Support radius parameter is used to justify which points need removal, and which do not. If two points are less than SUPPORT_R*CUR_RADIUS units of distance apart, one of them is removed from the model. The larger support radius is, the faster model construction AND evaluation are. However, too large values result in "bumpy" models. INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call R - support radius coefficient, >=0. Recommended values are [0.1,0.4] range, with 0.1 being default value. -- ALGLIB -- Copyright 01.02.2017 by Bochkanov Sergey *************************************************************************/ public static void rbfsetv2supportr(rbfmodel s, double r, alglib.xparams _params) { alglib.ap.assert(math.isfinite(r), "RBFSetV2SupportR: R is not finite"); alglib.ap.assert((double)(r)>=(double)(0), "RBFSetV2SupportR: R<0"); s.model2.supportr = r; } /************************************************************************* This function sets stopping criteria of the underlying linear solver. INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call EpsOrt - orthogonality stopping criterion, EpsOrt>=0. Algorithm will stop when ||A'*r||<=EpsOrt where A' is a transpose of the system matrix, r is a residual vector. Recommended value of EpsOrt is equal to 1E-6. This criterion will stop algorithm when we have "bad fit" situation, i.e. when we should stop in a point with large, nonzero residual. EpsErr - residual stopping criterion. Algorithm will stop when ||r||<=EpsErr*||b||, where r is a residual vector, b is a right part of the system (function values). Recommended value of EpsErr is equal to 1E-3 or 1E-6. This criterion will stop algorithm in a "good fit" situation when we have near-zero residual near the desired solution. MaxIts - this criterion will stop algorithm after MaxIts iterations. It should be used for debugging purposes only! Zero MaxIts means that no limit is placed on the number of iterations. We recommend to set moderate non-zero values EpsOrt and EpsErr simultaneously. Values equal to 10E-6 are good to start with. In case you need high performance and do not need high precision , you may decrease EpsErr down to 0.001. However, we do not recommend decreasing EpsOrt. As for MaxIts, we recommend to leave it zero unless you know what you do. NOTE: this function has some serialization-related subtleties. We recommend you to study serialization examples from ALGLIB Reference Manual if you want to perform serialization of your models. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfsetcond(rbfmodel s, double epsort, double epserr, int maxits, alglib.xparams _params) { alglib.ap.assert(math.isfinite(epsort) && (double)(epsort)>=(double)(0), "RBFSetCond: EpsOrt is negative, INF or NAN"); alglib.ap.assert(math.isfinite(epserr) && (double)(epserr)>=(double)(0), "RBFSetCond: EpsB is negative, INF or NAN"); alglib.ap.assert(maxits>=0, "RBFSetCond: MaxIts is negative"); if( ((double)(epsort)==(double)(0) && (double)(epserr)==(double)(0)) && maxits==0 ) { s.epsort = eps; s.epserr = eps; s.maxits = 0; } else { s.epsort = epsort; s.epserr = epserr; s.maxits = maxits; } } /************************************************************************* This function builds RBF model and returns report (contains some information which can be used for evaluation of the algorithm properties). Call to this function modifies RBF model by calculating its centers/radii/ weights and saving them into RBFModel structure. Initially RBFModel contain zero coefficients, but after call to this function we will have coefficients which were calculated in order to fit our dataset. After you called this function you can call RBFCalc(), RBFGridCalc() and other model calculation functions. INPUT PARAMETERS: S - RBF model, initialized by RBFCreate() call Rep - report: * Rep.TerminationType: * -5 - non-distinct basis function centers were detected, interpolation aborted; only QNN returns this error code, other algorithms can handle non- distinct nodes. * -4 - nonconvergence of the internal SVD solver * -3 incorrect model construction algorithm was chosen: QNN or RBF-ML, combined with one of the incompatible features - NX=1 or NX>3; points with per-dimension scales. * 1 - successful termination * 8 - a termination request was submitted via rbfrequesttermination() function. Fields which are set only by modern RBF solvers (hierarchical or nonnegative; older solvers like QNN and ML initialize these fields by NANs): * rep.rmserror - root-mean-square error at nodes * rep.maxerror - maximum error at nodes Fields are used for debugging purposes: * Rep.IterationsCount - iterations count of the LSQR solver * Rep.NMV - number of matrix-vector products * Rep.ARows - rows count for the system matrix * Rep.ACols - columns count for the system matrix * Rep.ANNZ - number of significantly non-zero elements (elements above some algorithm-determined threshold) NOTE: failure to build model will leave current state of the structure unchanged. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfbuildmodel(rbfmodel s, rbfreport rep, alglib.xparams _params) { rbfv1.rbfv1report rep1 = new rbfv1.rbfv1report(); rbfv2.rbfv2report rep2 = new rbfv2.rbfv2report(); double[,] x3 = new double[0,0]; double[] scalevec = new double[0]; int i = 0; int curalgorithmtype = 0; // // Clean fields prior to processing // clearreportfields(rep, _params); s.progress10000 = 0; s.terminationrequest = false; // // Autoselect algorithm // if( s.algorithmtype==0 ) { if( (s.nx<2 || s.nx>3) || s.hasscale ) { curalgorithmtype = 3; } else { curalgorithmtype = 1; } } else { curalgorithmtype = s.algorithmtype; } // // Algorithms which generate V1 models // if( curalgorithmtype==1 || curalgorithmtype==2 ) { // // Perform compatibility checks // if( (s.nx<2 || s.nx>3) || s.hasscale ) { rep.terminationtype = -3; return; } // // Try to build model. // // NOTE: due to historical reasons RBFV1BuildModel() accepts points // cast to 3-dimensional space, even if they are really 2-dimensional. // So, for 2D data we have to explicitly convert them to 3D. // if( s.nx==2 ) { // // Convert data to 3D // apserv.rmatrixsetlengthatleast(ref x3, s.n, 3, _params); for(i=0; i<=s.n-1; i++) { x3[i,0] = s.x[i,0]; x3[i,1] = s.x[i,1]; x3[i,2] = 0; } rbfv1.rbfv1buildmodel(x3, s.y, s.n, s.aterm, curalgorithmtype, s.nlayers, s.radvalue, s.radzvalue, s.lambdav, s.epsort, s.epserr, s.maxits, s.model1, rep1, _params); } else { // // Work with raw data // rbfv1.rbfv1buildmodel(s.x, s.y, s.n, s.aterm, curalgorithmtype, s.nlayers, s.radvalue, s.radzvalue, s.lambdav, s.epsort, s.epserr, s.maxits, s.model1, rep1, _params); } s.modelversion = 1; // // Convert report fields // rep.arows = rep1.arows; rep.acols = rep1.acols; rep.annz = rep1.annz; rep.iterationscount = rep1.iterationscount; rep.nmv = rep1.nmv; rep.terminationtype = rep1.terminationtype; // // Done // return; } // // Algorithms which generate V2 models // if( curalgorithmtype==3 ) { // // Prepare scale vector - use unit values or user supplied ones // scalevec = new double[s.nx]; for(i=0; i<=s.nx-1; i++) { if( s.hasscale ) { scalevec[i] = s.s[i]; } else { scalevec[i] = 1; } } // // Build model // rbfv2.rbfv2buildhierarchical(s.x, s.y, s.n, scalevec, s.aterm, s.nlayers, s.radvalue, s.lambdav, s.model2, ref s.progress10000, ref s.terminationrequest, rep2, _params); s.modelversion = 2; // // Convert report fields // rep.terminationtype = rep2.terminationtype; rep.rmserror = rep2.rmserror; rep.maxerror = rep2.maxerror; // // Done // return; } // // Critical error // alglib.ap.assert(false, "RBFBuildModel: integrity check failure"); } /************************************************************************* This function calculates values of the RBF model in the given point. IMPORTANT: this function works only with modern (hierarchical) RBFs. It can not be used with legacy (version 1) RBFs because older RBF code does not support 1-dimensional models. This function should be used when we have NY=1 (scalar function) and NX=1 (1-dimensional space). If you have 3-dimensional space, use rbfcalc3(). If you have 2-dimensional space, use rbfcalc3(). If you have general situation (NX-dimensional space, NY-dimensional function) you should use generic rbfcalc(). If you want to perform parallel model evaluation from multiple threads, use rbftscalcbuf() with per-thread buffer object. This function returns 0.0 when: * model is not initialized * NX<>1 * NY<>1 INPUT PARAMETERS: S - RBF model X0 - X-coordinate, finite number RESULT: value of the model or 0.0 (as defined above) -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static double rbfcalc1(rbfmodel s, double x0, alglib.xparams _params) { double result = 0; alglib.ap.assert(math.isfinite(x0), "RBFCalc1: invalid value for X0 (X0 is Inf)!"); result = 0; if( s.ny!=1 || s.nx!=1 ) { return result; } if( s.modelversion==1 ) { result = 0; return result; } if( s.modelversion==2 ) { result = rbfv2.rbfv2calc1(s.model2, x0, _params); return result; } alglib.ap.assert(false, "RBFCalc1: integrity check failed"); return result; } /************************************************************************* This function calculates values of the RBF model in the given point. This function should be used when we have NY=1 (scalar function) and NX=2 (2-dimensional space). If you have 3-dimensional space, use rbfcalc3(). If you have general situation (NX-dimensional space, NY-dimensional function) you should use generic rbfcalc(). If you want to calculate function values many times, consider using rbfgridcalc2v(), which is far more efficient than many subsequent calls to rbfcalc2(). If you want to perform parallel model evaluation from multiple threads, use rbftscalcbuf() with per-thread buffer object. This function returns 0.0 when: * model is not initialized * NX<>2 *NY<>1 INPUT PARAMETERS: S - RBF model X0 - first coordinate, finite number X1 - second coordinate, finite number RESULT: value of the model or 0.0 (as defined above) -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static double rbfcalc2(rbfmodel s, double x0, double x1, alglib.xparams _params) { double result = 0; alglib.ap.assert(math.isfinite(x0), "RBFCalc2: invalid value for X0 (X0 is Inf)!"); alglib.ap.assert(math.isfinite(x1), "RBFCalc2: invalid value for X1 (X1 is Inf)!"); result = 0; if( s.ny!=1 || s.nx!=2 ) { return result; } if( s.modelversion==1 ) { result = rbfv1.rbfv1calc2(s.model1, x0, x1, _params); return result; } if( s.modelversion==2 ) { result = rbfv2.rbfv2calc2(s.model2, x0, x1, _params); return result; } alglib.ap.assert(false, "RBFCalc2: integrity check failed"); return result; } /************************************************************************* This function calculates value of the RBF model in the given point. This function should be used when we have NY=1 (scalar function) and NX=3 (3-dimensional space). If you have 2-dimensional space, use rbfcalc2(). If you have general situation (NX-dimensional space, NY-dimensional function) you should use generic rbfcalc(). If you want to calculate function values many times, consider using rbfgridcalc3v(), which is far more efficient than many subsequent calls to rbfcalc3(). If you want to perform parallel model evaluation from multiple threads, use rbftscalcbuf() with per-thread buffer object. This function returns 0.0 when: * model is not initialized * NX<>3 *NY<>1 INPUT PARAMETERS: S - RBF model X0 - first coordinate, finite number X1 - second coordinate, finite number X2 - third coordinate, finite number RESULT: value of the model or 0.0 (as defined above) -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static double rbfcalc3(rbfmodel s, double x0, double x1, double x2, alglib.xparams _params) { double result = 0; alglib.ap.assert(math.isfinite(x0), "RBFCalc3: invalid value for X0 (X0 is Inf or NaN)!"); alglib.ap.assert(math.isfinite(x1), "RBFCalc3: invalid value for X1 (X1 is Inf or NaN)!"); alglib.ap.assert(math.isfinite(x2), "RBFCalc3: invalid value for X2 (X2 is Inf or NaN)!"); result = 0; if( s.ny!=1 || s.nx!=3 ) { return result; } if( s.modelversion==1 ) { result = rbfv1.rbfv1calc3(s.model1, x0, x1, x2, _params); return result; } if( s.modelversion==2 ) { result = rbfv2.rbfv2calc3(s.model2, x0, x1, x2, _params); return result; } alglib.ap.assert(false, "RBFCalc3: integrity check failed"); return result; } /************************************************************************* This function calculates values of the RBF model at the given point. This is general function which can be used for arbitrary NX (dimension of the space of arguments) and NY (dimension of the function itself). However when you have NY=1 you may find more convenient to use rbfcalc2() or rbfcalc3(). If you want to perform parallel model evaluation from multiple threads, use rbftscalcbuf() with per-thread buffer object. This function returns 0.0 when model is not initialized. INPUT PARAMETERS: S - RBF model X - coordinates, array[NX]. X may have more than NX elements, in this case only leading NX will be used. OUTPUT PARAMETERS: Y - function value, array[NY]. Y is out-parameter and reallocated after call to this function. In case you want to reuse previously allocated Y, you may use RBFCalcBuf(), which reallocates Y only when it is too small. -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfcalc(rbfmodel s, double[] x, ref double[] y, alglib.xparams _params) { y = new double[0]; alglib.ap.assert(alglib.ap.len(x)>=s.nx, "RBFCalc: Length(X)=s.nx, "RBFCalcBuf: Length(X)=s.nx, "RBFCalcBuf: Length(X)0, "RBFGridCalc2: invalid value for N0 (N0<=0)!"); alglib.ap.assert(n1>0, "RBFGridCalc2: invalid value for N1 (N1<=0)!"); alglib.ap.assert(alglib.ap.len(x0)>=n0, "RBFGridCalc2: Length(X0)=n1, "RBFGridCalc2: Length(X1)2 ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. NOTE: Parallel processing is implemented only for modern (hierarchical) RBFs. Legacy version 1 RBFs (created by QNN or RBF-ML) are still processed serially. INPUT PARAMETERS: S - RBF model, used in read-only mode, can be shared between multiple invocations of this function from multiple threads. X0 - array of grid nodes, first coordinates, array[N0]. Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N0 - grid size (number of nodes) in the first dimension X1 - array of grid nodes, second coordinates, array[N1] Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N1 - grid size (number of nodes) in the second dimension OUTPUT PARAMETERS: Y - function values, array[NY*N0*N1], where NY is a number of "output" vector values (this function supports vector- valued RBF models). Y is out-variable and is reallocated by this function. Y[K+NY*(I0+I1*N0)]=F_k(X0[I0],X1[I1]), for: * K=0...NY-1 * I0=0...N0-1 * I1=0...N1-1 NOTE: this function supports weakly ordered grid nodes, i.e. you may have X[i]=X[i+1] for some i. It does not provide you any performance benefits due to duplication of points, just convenience and flexibility. NOTE: this function is re-entrant, i.e. you may use same rbfmodel structure in multiple threads calling this function for different grids. NOTE: if you need function values on some subset of regular grid, which may be described as "several compact and dense islands", you may use rbfgridcalc2vsubset(). -- ALGLIB -- Copyright 27.01.2017 by Bochkanov Sergey *************************************************************************/ public static void rbfgridcalc2v(rbfmodel s, double[] x0, int n0, double[] x1, int n1, ref double[] y, alglib.xparams _params) { int i = 0; bool[] dummy = new bool[0]; y = new double[0]; alglib.ap.assert(n0>0, "RBFGridCalc2V: invalid value for N0 (N0<=0)!"); alglib.ap.assert(n1>0, "RBFGridCalc2V: invalid value for N1 (N1<=0)!"); alglib.ap.assert(alglib.ap.len(x0)>=n0, "RBFGridCalc2V: Length(X0)=n1, "RBFGridCalc2V: Length(X1)2 ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. NOTE: Parallel processing is implemented only for modern (hierarchical) RBFs. Legacy version 1 RBFs (created by QNN or RBF-ML) are still processed serially. INPUT PARAMETERS: S - RBF model, used in read-only mode, can be shared between multiple invocations of this function from multiple threads. X0 - array of grid nodes, first coordinates, array[N0]. Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N0 - grid size (number of nodes) in the first dimension X1 - array of grid nodes, second coordinates, array[N1] Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N1 - grid size (number of nodes) in the second dimension FlagY - array[N0*N1]: * Y[I0+I1*N0] corresponds to node (X0[I0],X1[I1]) * it is a "bitmap" array which contains False for nodes which are NOT calculated, and True for nodes which are required. OUTPUT PARAMETERS: Y - function values, array[NY*N0*N1*N2], where NY is a number of "output" vector values (this function supports vector- valued RBF models): * Y[K+NY*(I0+I1*N0)]=F_k(X0[I0],X1[I1]), for K=0...NY-1, I0=0...N0-1, I1=0...N1-1. * elements of Y[] which correspond to FlagY[]=True are loaded by model values (which may be exactly zero for some nodes). * elements of Y[] which correspond to FlagY[]=False MAY be initialized by zeros OR may be calculated. This function processes grid as a hierarchy of nested blocks and micro-rows. If just one element of micro-row is required, entire micro-row (up to 8 nodes in the current version, but no promises) is calculated. NOTE: this function supports weakly ordered grid nodes, i.e. you may have X[i]=X[i+1] for some i. It does not provide you any performance benefits due to duplication of points, just convenience and flexibility. NOTE: this function is re-entrant, i.e. you may use same rbfmodel structure in multiple threads calling this function for different grids. -- ALGLIB -- Copyright 04.03.2016 by Bochkanov Sergey *************************************************************************/ public static void rbfgridcalc2vsubset(rbfmodel s, double[] x0, int n0, double[] x1, int n1, bool[] flagy, ref double[] y, alglib.xparams _params) { int i = 0; y = new double[0]; alglib.ap.assert(n0>0, "RBFGridCalc2VSubset: invalid value for N0 (N0<=0)!"); alglib.ap.assert(n1>0, "RBFGridCalc2VSubset: invalid value for N1 (N1<=0)!"); alglib.ap.assert(alglib.ap.len(x0)>=n0, "RBFGridCalc2VSubset: Length(X0)=n1, "RBFGridCalc2VSubset: Length(X1)=n0*n1, "RBFGridCalc2VSubset: Length(FlagY)3 ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. NOTE: Parallel processing is implemented only for modern (hierarchical) RBFs. Legacy version 1 RBFs (created by QNN or RBF-ML) are still processed serially. INPUT PARAMETERS: S - RBF model, used in read-only mode, can be shared between multiple invocations of this function from multiple threads. X0 - array of grid nodes, first coordinates, array[N0]. Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N0 - grid size (number of nodes) in the first dimension X1 - array of grid nodes, second coordinates, array[N1] Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N1 - grid size (number of nodes) in the second dimension X2 - array of grid nodes, third coordinates, array[N2] Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N2 - grid size (number of nodes) in the third dimension OUTPUT PARAMETERS: Y - function values, array[NY*N0*N1*N2], where NY is a number of "output" vector values (this function supports vector- valued RBF models). Y is out-variable and is reallocated by this function. Y[K+NY*(I0+I1*N0+I2*N0*N1)]=F_k(X0[I0],X1[I1],X2[I2]), for: * K=0...NY-1 * I0=0...N0-1 * I1=0...N1-1 * I2=0...N2-1 NOTE: this function supports weakly ordered grid nodes, i.e. you may have X[i]=X[i+1] for some i. It does not provide you any performance benefits due to duplication of points, just convenience and flexibility. NOTE: this function is re-entrant, i.e. you may use same rbfmodel structure in multiple threads calling this function for different grids. NOTE: if you need function values on some subset of regular grid, which may be described as "several compact and dense islands", you may use rbfgridcalc3vsubset(). -- ALGLIB -- Copyright 04.03.2016 by Bochkanov Sergey *************************************************************************/ public static void rbfgridcalc3v(rbfmodel s, double[] x0, int n0, double[] x1, int n1, double[] x2, int n2, ref double[] y, alglib.xparams _params) { int i = 0; bool[] dummy = new bool[0]; y = new double[0]; alglib.ap.assert(n0>0, "RBFGridCalc3V: invalid value for N0 (N0<=0)!"); alglib.ap.assert(n1>0, "RBFGridCalc3V: invalid value for N1 (N1<=0)!"); alglib.ap.assert(n2>0, "RBFGridCalc3V: invalid value for N2 (N2<=0)!"); alglib.ap.assert(alglib.ap.len(x0)>=n0, "RBFGridCalc3V: Length(X0)=n1, "RBFGridCalc3V: Length(X1)=n2, "RBFGridCalc3V: Length(X2)3 ! COMMERCIAL EDITION OF ALGLIB: ! ! Commercial Edition of ALGLIB includes following important improvements ! of this function: ! * high-performance native backend with same C# interface (C# version) ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. NOTE: Parallel processing is implemented only for modern (hierarchical) RBFs. Legacy version 1 RBFs (created by QNN or RBF-ML) are still processed serially. INPUT PARAMETERS: S - RBF model, used in read-only mode, can be shared between multiple invocations of this function from multiple threads. X0 - array of grid nodes, first coordinates, array[N0]. Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N0 - grid size (number of nodes) in the first dimension X1 - array of grid nodes, second coordinates, array[N1] Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N1 - grid size (number of nodes) in the second dimension X2 - array of grid nodes, third coordinates, array[N2] Must be ordered by ascending. Exception is generated if the array is not correctly ordered. N2 - grid size (number of nodes) in the third dimension FlagY - array[N0*N1*N2]: * Y[I0+I1*N0+I2*N0*N1] corresponds to node (X0[I0],X1[I1],X2[I2]) * it is a "bitmap" array which contains False for nodes which are NOT calculated, and True for nodes which are required. OUTPUT PARAMETERS: Y - function values, array[NY*N0*N1*N2], where NY is a number of "output" vector values (this function supports vector- valued RBF models): * Y[K+NY*(I0+I1*N0+I2*N0*N1)]=F_k(X0[I0],X1[I1],X2[I2]), for K=0...NY-1, I0=0...N0-1, I1=0...N1-1, I2=0...N2-1. * elements of Y[] which correspond to FlagY[]=True are loaded by model values (which may be exactly zero for some nodes). * elements of Y[] which correspond to FlagY[]=False MAY be initialized by zeros OR may be calculated. This function processes grid as a hierarchy of nested blocks and micro-rows. If just one element of micro-row is required, entire micro-row (up to 8 nodes in the current version, but no promises) is calculated. NOTE: this function supports weakly ordered grid nodes, i.e. you may have X[i]=X[i+1] for some i. It does not provide you any performance benefits due to duplication of points, just convenience and flexibility. NOTE: this function is re-entrant, i.e. you may use same rbfmodel structure in multiple threads calling this function for different grids. -- ALGLIB -- Copyright 04.03.2016 by Bochkanov Sergey *************************************************************************/ public static void rbfgridcalc3vsubset(rbfmodel s, double[] x0, int n0, double[] x1, int n1, double[] x2, int n2, bool[] flagy, ref double[] y, alglib.xparams _params) { int i = 0; y = new double[0]; alglib.ap.assert(n0>0, "RBFGridCalc3VSubset: invalid value for N0 (N0<=0)!"); alglib.ap.assert(n1>0, "RBFGridCalc3VSubset: invalid value for N1 (N1<=0)!"); alglib.ap.assert(n2>0, "RBFGridCalc3VSubset: invalid value for N2 (N2<=0)!"); alglib.ap.assert(alglib.ap.len(x0)>=n0, "RBFGridCalc3VSubset: Length(X0)=n1, "RBFGridCalc3VSubset: Length(X1)=n2, "RBFGridCalc3VSubset: Length(X2)=n0*n1*n2, "RBFGridCalc3VSubset: Length(FlagY)0, "RBFGridCalc2VX: invalid value for N0 (N0<=0)!"); alglib.ap.assert(n1>0, "RBFGridCalc2VX: invalid value for N1 (N1<=0)!"); alglib.ap.assert(alglib.ap.len(x0)>=n0, "RBFGridCalc2VX: Length(X0)=n1, "RBFGridCalc2VX: Length(X1)0, "RBFGridCalc3V: invalid value for N0 (N0<=0)!"); alglib.ap.assert(n1>0, "RBFGridCalc3V: invalid value for N1 (N1<=0)!"); alglib.ap.assert(n2>0, "RBFGridCalc3V: invalid value for N2 (N2<=0)!"); alglib.ap.assert(alglib.ap.len(x0)>=n0, "RBFGridCalc3V: Length(X0)=n1, "RBFGridCalc3V: Length(X1)=n2, "RBFGridCalc3V: Length(X2)(double)(blockwidth) || i-blocks0[blockscnt0]>=maxblocksize ) { apserv.inc(ref blockscnt0, _params); blocks0[blockscnt0] = i; } } apserv.inc(ref blockscnt0, _params); blocks0[blockscnt0] = n0; blocks1 = new int[n1+1]; blockscnt1 = 0; blocks1[0] = 0; for(i=1; i<=n1-1; i++) { if( (double)(x1[i]-x1[blocks1[blockscnt1]])>(double)(blockwidth) || i-blocks1[blockscnt1]>=maxblocksize ) { apserv.inc(ref blockscnt1, _params); blocks1[blockscnt1] = i; } } apserv.inc(ref blockscnt1, _params); blocks1[blockscnt1] = n1; blocks2 = new int[n2+1]; blockscnt2 = 0; blocks2[0] = 0; for(i=1; i<=n2-1; i++) { if( (double)(x2[i]-x2[blocks2[blockscnt2]])>(double)(blockwidth) || i-blocks2[blockscnt2]>=maxblocksize ) { apserv.inc(ref blockscnt2, _params); blocks2[blockscnt2] = i; } } apserv.inc(ref blockscnt2, _params); blocks2[blockscnt2] = n2; // // Perform calculation in multithreaded mode // rbfv1.rbfv1gridcalc3vrec(s.model1, x0, n0, x1, n1, x2, n2, blocks0, 0, blockscnt0, blocks1, 0, blockscnt1, blocks2, 0, blockscnt2, flagy, sparsey, searchradius, avgfuncpernode, bufpool, y, _params); // // Done // return; } // // Process V2 model // if( s.modelversion==2 ) { dummyx3 = new double[1]; dummyx3[0] = 0; rbfv2.rbfv2gridcalcvx(s.model2, x0, n0, x1, n1, x2, n2, dummyx3, 1, flagy, sparsey, y, _params); return; } // // Unknown model // alglib.ap.assert(false, "RBFGradCalc3VX: integrity check failed"); } /************************************************************************* This function "unpacks" RBF model by extracting its coefficients. INPUT PARAMETERS: S - RBF model OUTPUT PARAMETERS: NX - dimensionality of argument NY - dimensionality of the target function XWR - model information, array[NC,NX+NY+1]. One row of the array corresponds to one basis function: * first NX columns - coordinates of the center * next NY columns - weights, one per dimension of the function being modelled For ModelVersion=1: * last column - radius, same for all dimensions of the function being modelled For ModelVersion=2: * last NX columns - radii, one per dimension NC - number of the centers V - polynomial term , array[NY,NX+1]. One row per one dimension of the function being modelled. First NX elements are linear coefficients, V[NX] is equal to the constant part. ModelVersion-version of the RBF model: * 1 - for models created by QNN and RBF-ML algorithms, compatible with ALGLIB 3.10 or earlier. * 2 - for models created by HierarchicalRBF, requires ALGLIB 3.11 or later -- ALGLIB -- Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ public static void rbfunpack(rbfmodel s, ref int nx, ref int ny, ref double[,] xwr, ref int nc, ref double[,] v, ref int modelversion, alglib.xparams _params) { nx = 0; ny = 0; xwr = new double[0,0]; nc = 0; v = new double[0,0]; modelversion = 0; if( s.modelversion==1 ) { modelversion = 1; rbfv1.rbfv1unpack(s.model1, ref nx, ref ny, ref xwr, ref nc, ref v, _params); return; } if( s.modelversion==2 ) { modelversion = 2; rbfv2.rbfv2unpack(s.model2, ref nx, ref ny, ref xwr, ref nc, ref v, _params); return; } alglib.ap.assert(false, "RBFUnpack: integrity check failure"); } /************************************************************************* This function returns model version. INPUT PARAMETERS: S - RBF model RESULT: * 1 - for models created by QNN and RBF-ML algorithms, compatible with ALGLIB 3.10 or earlier. * 2 - for models created by HierarchicalRBF, requires ALGLIB 3.11 or later -- ALGLIB -- Copyright 06.07.2016 by Bochkanov Sergey *************************************************************************/ public static int rbfgetmodelversion(rbfmodel s, alglib.xparams _params) { int result = 0; result = s.modelversion; return result; } /************************************************************************* This function is used to peek into hierarchical RBF construction process from some other thread and get current progress indicator. It returns value in [0,1]. IMPORTANT: only HRBFs (hierarchical RBFs) support peeking into progress indicator. Legacy RBF-ML and RBF-QNN do not support it. You will always get 0 value. INPUT PARAMETERS: S - RBF model object RESULT: progress value, in [0,1] -- ALGLIB -- Copyright 17.11.2018 by Bochkanov Sergey *************************************************************************/ public static double rbfpeekprogress(rbfmodel s, alglib.xparams _params) { double result = 0; result = (double)s.progress10000/(double)10000; return result; } /************************************************************************* This function is used to submit a request for termination of the hierarchical RBF construction process from some other thread. As result, RBF construction is terminated smoothly (with proper deallocation of all necessary resources) and resultant model is filled by zeros. A rep.terminationtype=8 will be returned upon receiving such request. IMPORTANT: only HRBFs (hierarchical RBFs) support termination requests. Legacy RBF-ML and RBF-QNN do not support it. An attempt to terminate their construction will be ignored. IMPORTANT: termination request flag is cleared when the model construction starts. Thus, any pre-construction termination requests will be silently ignored - only ones submitted AFTER construction has actually began will be handled. INPUT PARAMETERS: S - RBF model object -- ALGLIB -- Copyright 17.11.2018 by Bochkanov Sergey *************************************************************************/ public static void rbfrequesttermination(rbfmodel s, alglib.xparams _params) { s.terminationrequest = true; } /************************************************************************* Serializer: allocation -- ALGLIB -- Copyright 02.02.2012 by Bochkanov Sergey *************************************************************************/ public static void rbfalloc(alglib.serializer s, rbfmodel model, alglib.xparams _params) { // // Header // s.alloc_entry(); // // V1 model // if( model.modelversion==1 ) { // // Header // s.alloc_entry(); rbfv1.rbfv1alloc(s, model.model1, _params); return; } // // V2 model // if( model.modelversion==2 ) { // // Header // s.alloc_entry(); rbfv2.rbfv2alloc(s, model.model2, _params); return; } alglib.ap.assert(false); } /************************************************************************* Serializer: serialization -- ALGLIB -- Copyright 02.02.2012 by Bochkanov Sergey *************************************************************************/ public static void rbfserialize(alglib.serializer s, rbfmodel model, alglib.xparams _params) { // // Header // s.serialize_int(scodes.getrbfserializationcode(_params)); // // V1 model // if( model.modelversion==1 ) { s.serialize_int(rbffirstversion); rbfv1.rbfv1serialize(s, model.model1, _params); return; } // // V2 model // if( model.modelversion==2 ) { // // Header // s.serialize_int(rbfversion2); rbfv2.rbfv2serialize(s, model.model2, _params); return; } alglib.ap.assert(false); } /************************************************************************* Serializer: unserialization -- ALGLIB -- Copyright 02.02.2012 by Bochkanov Sergey *************************************************************************/ public static void rbfunserialize(alglib.serializer s, rbfmodel model, alglib.xparams _params) { int i0 = 0; int i1 = 0; rbfpreparenonserializablefields(model, _params); // // Header // i0 = s.unserialize_int(); alglib.ap.assert(i0==scodes.getrbfserializationcode(_params), "RBFUnserialize: stream header corrupted"); i1 = s.unserialize_int(); alglib.ap.assert(i1==rbffirstversion || i1==rbfversion2, "RBFUnserialize: stream header corrupted"); // // V1 model // if( i1==rbffirstversion ) { rbfv1.rbfv1unserialize(s, model.model1, _params); model.modelversion = 1; model.ny = model.model1.ny; model.nx = model.model1.nx; initializev2(model.nx, model.ny, model.model2, _params); return; } // // V2 model // if( i1==rbfversion2 ) { rbfv2.rbfv2unserialize(s, model.model2, _params); model.modelversion = 2; model.ny = model.model2.ny; model.nx = model.model2.nx; initializev1(model.nx, model.ny, model.model1, _params); return; } alglib.ap.assert(false); } /************************************************************************* Initialize empty model -- ALGLIB -- Copyright 12.05.2016 by Bochkanov Sergey *************************************************************************/ private static void rbfpreparenonserializablefields(rbfmodel s, alglib.xparams _params) { s.n = 0; s.hasscale = false; s.radvalue = 1; s.radzvalue = 5; s.nlayers = 0; s.lambdav = 0; s.aterm = 1; s.algorithmtype = 0; s.epsort = eps; s.epserr = eps; s.maxits = 0; s.nnmaxits = 100; } /************************************************************************* Initialize V1 model (skip initialization for NX=1 or NX>3) -- ALGLIB -- Copyright 12.05.2016 by Bochkanov Sergey *************************************************************************/ private static void initializev1(int nx, int ny, rbfv1.rbfv1model s, alglib.xparams _params) { if( nx==2 || nx==3 ) { rbfv1.rbfv1create(nx, ny, s, _params); } } /************************************************************************* Initialize V2 model -- ALGLIB -- Copyright 12.05.2016 by Bochkanov Sergey *************************************************************************/ private static void initializev2(int nx, int ny, rbfv2.rbfv2model s, alglib.xparams _params) { rbfv2.rbfv2create(nx, ny, s, _params); } /************************************************************************* Cleans report fields -- ALGLIB -- Copyright 16.06.2016 by Bochkanov Sergey *************************************************************************/ private static void clearreportfields(rbfreport rep, alglib.xparams _params) { rep.rmserror = Double.NaN; rep.maxerror = Double.NaN; rep.arows = 0; rep.acols = 0; rep.annz = 0; rep.iterationscount = 0; rep.nmv = 0; rep.terminationtype = 0; } } public class intcomp { /************************************************************************* This function is left for backward compatibility. Use fitspheremc() instead. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void nsfitspheremcc(double[,] xy, int npoints, int nx, ref double[] cx, ref double rhi, alglib.xparams _params) { double dummy = 0; cx = new double[0]; rhi = 0; nsfitspherex(xy, npoints, nx, 1, 0.0, 0, 0.0, ref cx, ref dummy, ref rhi, _params); } /************************************************************************* This function is left for backward compatibility. Use fitspheremi() instead. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void nsfitspheremic(double[,] xy, int npoints, int nx, ref double[] cx, ref double rlo, alglib.xparams _params) { double dummy = 0; cx = new double[0]; rlo = 0; nsfitspherex(xy, npoints, nx, 2, 0.0, 0, 0.0, ref cx, ref rlo, ref dummy, _params); } /************************************************************************* This function is left for backward compatibility. Use fitspheremz() instead. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void nsfitspheremzc(double[,] xy, int npoints, int nx, ref double[] cx, ref double rlo, ref double rhi, alglib.xparams _params) { cx = new double[0]; rlo = 0; rhi = 0; nsfitspherex(xy, npoints, nx, 3, 0.0, 0, 0.0, ref cx, ref rlo, ref rhi, _params); } /************************************************************************* This function is left for backward compatibility. Use fitspherex() instead. -- ALGLIB -- Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ public static void nsfitspherex(double[,] xy, int npoints, int nx, int problemtype, double epsx, int aulits, double penalty, ref double[] cx, ref double rlo, ref double rhi, alglib.xparams _params) { cx = new double[0]; rlo = 0; rhi = 0; fitsphere.fitspherex(xy, npoints, nx, problemtype, epsx, aulits, penalty, ref cx, ref rlo, ref rhi, _params); } } }